id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
3416556 | import torch
from torch.distributed.shard.sharded_tensor import (
sharded_op_impl,
)
def validate_param(param, param_name):
if param is None:
raise ValueError(f"param: {param_name} shouldn't be None!")
@sharded_op_impl(torch.nn.init.uniform_)
def uniform_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensor in sharded_tensor.local_shards with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
sharded_tensor: tensor sharded across devices
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
a = kwargs['a']
validate_param(a, "a")
b = kwargs['b']
validate_param(b, "b")
for shard in sharded_tensor.local_shards():
torch.nn.init.uniform_(shard.tensor, a=a, b=b)
return sharded_tensor
@sharded_op_impl(torch.nn.init.normal_)
def normal_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensors in sharded_tensor.local_shards with values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
sharded_tensor: tensor sharded across devices
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
mean = kwargs['mean']
validate_param(mean, "mean")
std = kwargs['std']
validate_param(std, "std")
for shard in sharded_tensor.local_shards():
torch.nn.init.normal_(shard.tensor, mean=mean, std=std)
return sharded_tensor
@sharded_op_impl(torch.nn.init.kaiming_uniform_)
def kaiming_uniform_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensors in sharded_tensor.local_shards with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - <NAME>. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
Also known as He initialization.
Args:
sharded_tensor: tensor sharded across devices
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
a = kwargs['a']
validate_param(a, "a")
mode = kwargs['mode']
validate_param(mode, "mode")
nonlinearity = kwargs['nonlinearity']
validate_param(nonlinearity, "nonlinearity")
for shard in sharded_tensor.local_shards():
torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity)
return sharded_tensor
| StarcoderdataPython |
5191056 | <filename>Code/Python/DataStructures/class_practice2.py
class person:
age = 0
initialAge = 24
gender = "male"
height = "6 foot 0 inches"
newPerson = person()
print(newPerson.age)
print(newPerson.height)
class people:
def __init__(self,name,age):
self.name = name
self.age = age
new_people = people("Katia",22)
print(new_people.name)
print(new_people.age)
myself = people("Darius",24)
print(myself.name)
print(myself.age)
print("Hi my name is " + str(myself.name) + " and I am " + str(myself.age) + " years old.") | StarcoderdataPython |
3553988 | <gh_stars>0
"""
The following description is taken from the official website:
https://www.robots.ox.ac.uk/~vgg/data/voxceleb/
VoxCeleb is an audio-visual dataset consisting of short clips of human speech, extracted
from interview videos uploaded to YouTube. VoxCeleb contains speech from speakers spanning
a wide range of different ethnicities, accents, professions and ages. There are a total of
7000+ speakers and 1 million utterances.
All speaking face-tracks are captured "in the wild", with background chatter, laughter,
overlapping speech, pose variation and different lighting conditions. VoxCeleb consists
of both audio and video, comprising over 2000 hours of speech. Each segment is at least
3 seconds long.
The dataset consists of two versions, VoxCeleb1 and VoxCeleb2. Each version has it's own
train/test split. For each version, the YouTube URLs, face detections and tracks, audio files,
cropped face videos and speaker meta-data are provided. There is no overlap between the
two versions.
- VoxCeleb1: VoxCeleb1 contains over 100,000 utterances for 1,251 celebrities.
http://www.robots.ox.ac.uk/~vgg/data/voxceleb/
- VoxCeleb2: VoxCeleb2 contains over a million utterances for 6,112 identities.
http://www.robots.ox.ac.uk/~vgg/data/voxceleb2/
LICENSE: The VoxCeleb dataset is available to download for commercial/research purposes
under a Creative Commons Attribution 4.0 International License. The copyright remains with
the original owners of the video.
This Lhotse recipe prepares the VoxCeleb1 and VoxCeleb2 datasets.
"""
import logging
import zipfile
import shutil
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
from collections import defaultdict, namedtuple
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures import as_completed
from tqdm.auto import tqdm
from lhotse import (
MonoCut,
CutSet,
Recording,
RecordingSet,
SupervisionSegment,
SupervisionSet,
)
from lhotse.utils import Pathlike, urlretrieve_progress
from lhotse.qa import validate_recordings_and_supervisions
from lhotse.manipulation import combine
VOXCELEB1_PARTS_URL = [
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partaa",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partab",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partac",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partad",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_test_wav.zip",
]
VOXCELEB2_PARTS_URL = [
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partaa",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partab",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partac",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partad",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partae",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partaf",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partag",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partah",
"https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_test_aac.zip",
]
VOXCELEB1_TRIALS_URL = "http://www.openslr.org/resources/49/voxceleb1_test_v2.txt"
SpeakerMetadata = namedtuple(
"SpeakerMetadata", ["id", "name", "gender", "nationality", "split"]
)
def download_voxceleb1(
target_dir: Pathlike = ".",
force_download: Optional[bool] = False,
) -> Path:
"""
Download and unzip the VoxCeleb1 data.
.. note:: A "connection refused" error may occur if you are downloading without a password.
:param target_dir: Pathlike, the path of the dir to store the dataset.
:param force_download: bool, if True, download the archive even if it already exists.
:return: the path to downloaded and extracted directory with data.
"""
target_dir = Path(target_dir)
target_dir.mkdir(parents=True, exist_ok=True)
zip_name = "vox1_dev_wav.zip"
zip_path = target_dir / zip_name
if zip_path.exists() and not force_download:
logging.info(f"Skipping {zip_name} because file exists.")
else:
# Download the data in parts
for url in VOXCELEB1_PARTS_URL:
urlretrieve_progress(
url, desc=f"Downloading VoxCeleb1 {url.split('/')[-1]}"
)
# Combine the parts for dev set
with open(zip_name, "wb") as outFile:
for file in target_dir.glob("vox1_dev_wav_part*"):
with open(file, "rb") as inFile:
shutil.copyfileobj(inFile, outFile)
logging.info(f"Unzipping dev...")
with zipfile.ZipFile(zip_path) as zf:
zf.extractall(target_dir)
logging.info(f"Unzipping test...")
with zipfile.ZipFile(target_dir / "vox1_test_wav.zip") as zf:
zf.extractall(target_dir)
return target_dir
def download_voxceleb2(
target_dir: Pathlike = ".",
force_download: Optional[bool] = False,
) -> Path:
"""
Download and unzip the VoxCeleb2 data.
.. note:: A "connection refused" error may occur if you are downloading without a password.
:param target_dir: Pathlike, the path of the dir to store the dataset.
:param force_download: bool, if True, download the archive even if it already exists.
:return: the path to downloaded and extracted directory with data.
"""
target_dir = Path(target_dir)
target_dir.mkdir(parents=True, exist_ok=True)
zip_name = "vox2_aac.zip"
zip_path = target_dir / zip_name
if zip_path.exists() and not force_download:
logging.info(f"Skipping {zip_name} because file exists.")
else:
# Download the data in parts
for url in VOXCELEB2_PARTS_URL:
urlretrieve_progress(
url, desc=f"Downloading VoxCeleb2 {url.split('/')[-1]}"
)
# Combine the parts for dev set
with open(zip_name, "wb") as outFile:
for file in target_dir.glob("vox2_dev_aac_part*"):
with open(file, "rb") as inFile:
shutil.copyfileobj(inFile, outFile)
logging.info(f"Unzipping dev...")
with zipfile.ZipFile(zip_path) as zf:
zf.extractall(target_dir)
logging.info(f"Unzipping test...")
with zipfile.ZipFile(target_dir / "vox2_test_aac.zip") as zf:
zf.extractall(target_dir)
return target_dir
def prepare_voxceleb(
voxceleb1_root: Optional[Pathlike] = None,
voxceleb2_root: Optional[Pathlike] = None,
output_dir: Optional[Pathlike] = None,
num_jobs: int = 1,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Prepare manifests for the VoxCeleb v1 and v2 corpora.
The manifests are created in a dict with three splits: train, dev and test, for each
of the two versions.
Each split contains a RecordingSet and SupervisionSet in a dict under keys 'recordings' and 'supervisions'.
:param voxceleb1_root: Path to the VoxCeleb v1 dataset.
:param voxceleb2_root: Path to the VoxCeleb v2 dataset.
:param output_dir: Path to the output directory.
:param num_jobs: Number of parallel jobs to run.
:return: A dict with standard corpus splits ("train" and "test") containing the manifests.
NOTE: We prepare the data using the Kaldi style split, i.e., the whole VoxCeleb2
("dev" and "test") and the training portion ("dev") of VoxCeleb1 are put into the
"train" split. The "test" split contains the "test" portion of VoxCeleb1. So if
VoxCeleb1 is not provided, no "test" split is created in the output manifests.
Example usage:
.. code-block:: python
>>> from lhotse.recipes.voxceleb import prepare_voxceleb
>>> manifests = prepare_voxceleb(voxceleb_v1_root='/path/to/voxceleb1',
... voxceleb_v2_root='/path/to/voxceleb2',
... output_dir='/path/to/output',
... num_jobs=4)
NOTE: If VoxCeleb1 is provided, we also prepare the trials file using the list provided
in http://www.openslr.org/resources/49/voxceleb1_test_v2.txt. This file is used in the
Kaldi recipes for VoxCeleb speaker verification. This is prepared as 2 tuples of the form
(CutSet, CutSet) with identical id's, one for each of positive pairs and negative pairs.
These are stored in the dict under keys 'pos_trials' and 'neg_trials', respectively.
For evaluation purpose, the :class:`lhotse.dataset.sampling.CutPairsSampler`
can be used to sample from this tuple.
"""
voxceleb1_root = Path(voxceleb1_root) if voxceleb1_root else None
voxceleb2_root = Path(voxceleb2_root) if voxceleb2_root else None
if not (voxceleb1_root or voxceleb2_root):
raise ValueError("Either VoxCeleb1 or VoxCeleb2 path must be provided.")
output_dir = Path(output_dir) if output_dir is not None else None
manifests = defaultdict(dict)
if voxceleb1_root:
logging.info("Preparing VoxCeleb1...")
manifests.update(_prepare_voxceleb_v1(voxceleb1_root, num_jobs))
manifests.update(_prepare_voxceleb_trials(manifests["test"]))
else:
logging.info(
"VoxCeleb1 not provided, no test split or trials file will be created..."
)
if voxceleb2_root:
logging.info("Preparing VoxCeleb2...")
v2_manifests = _prepare_voxceleb_v2(voxceleb2_root, num_jobs)
if "train" in manifests:
manifests["train"]["recordings"] = combine(
manifests["train"]["recordings"], v2_manifests["recordings"]
)
manifests["train"]["supervisions"] = combine(
manifests["train"]["supervisions"], v2_manifests["supervisions"]
)
else:
manifests["train"] = v2_manifests
for split in ("train", "test"):
recordings = manifests[split]["recordings"]
supervisions = manifests[split]["supervisions"]
validate_recordings_and_supervisions(recordings, supervisions)
if output_dir is not None:
recordings.to_file(output_dir / f"recordings_voxceleb_{split}.jsonl.gz")
supervisions.to_file(output_dir / f"supervisions_voxceleb_{split}.jsonl.gz")
# Write the trials cut sets to the output directory
if output_dir is not None:
if "pos_trials" in manifests:
for i, cuts in enumerate(manifests["pos_trials"]):
cuts.to_file(output_dir / f"pos_trials_voxceleb_utt{i+1}.jsonl.gz")
if "neg_trials" in manifests:
for i, cuts in enumerate(manifests["neg_trials"]):
cuts.to_file(output_dir / f"neg_trials_voxceleb_utt{i+1}.jsonl.gz")
return manifests
def _prepare_voxceleb_v1(
corpus_path: Pathlike,
num_jobs: int,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Prepare manifests for the VoxCeleb1 corpus. The manifests are created in a dict with
2 splits: train ("dev") and test.
"""
speaker_metadata = {}
with open(corpus_path / "vox1_meta.csv", "r") as f:
next(f)
for line in f:
spkid, name, gender, nationality, split = line.strip().split("\t")
speaker_metadata[spkid] = SpeakerMetadata(
id=spkid, name=name, gender=gender, nationality=nationality, split=split
)
with ProcessPoolExecutor(num_jobs) as ex:
recordings = []
supervisions = []
futures = []
for p in (corpus_path / "wav").rglob("*.wav"):
futures.append(ex.submit(_process_file, p, speaker_metadata))
for future in tqdm(
as_completed(futures),
total=len(futures),
desc="Processing VoxCeleb1",
leave=False,
):
recording, supervision = future.result()
recordings.append(recording)
supervisions.append(supervision)
recording_set = RecordingSet.from_recordings(recordings)
supervision_set = SupervisionSet.from_segments(supervisions)
manifests = defaultdict(dict)
# Split into dev and test sets based on the split of the speakers.
for split in ("dev", "test"):
manifests[split]["supervisions"] = supervision_set.filter(
lambda s: s.custom["split"] == split
)
split_ids = [s.recording_id for s in manifests[split]["supervisions"]]
manifests[split]["recordings"] = recording_set.filter(
lambda r: r.id in split_ids
)
manifests["train"] = manifests.pop("dev")
return manifests
def _prepare_voxceleb_trials(
manifests: Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]
) -> Dict[str, Tuple[CutSet, CutSet]]:
"""
Prepare the trials file for the VoxCeleb1 corpus.
"""
recordings = manifests["recordings"]
supervisions = manifests["supervisions"]
cuts_utt1_pos, cuts_utt2_pos, cuts_utt1_neg, cuts_utt2_neg = [], [], [], []
urlretrieve_progress(VOXCELEB1_TRIALS_URL, filename="voxceleb_trials.txt")
with open("voxceleb_trials.txt", "r") as f:
for idx, line in enumerate(f):
target, utt1, utt2 = line.strip().split(" ")
# id10270/x6uYqmx31kE/00001.wav -> id10270-x6uYqmx31kE-00001
utt1 = "-".join(utt1.split(".")[0].split("/"))
utt2 = "-".join(utt2.split(".")[0].split("/"))
if utt1 not in recordings or utt2 not in recordings:
logging.warning(
f"Trial {idx} contains unknown recording: {utt1} or {utt2}"
)
continue
if target == "1":
cuts_utt1_pos.append(
MonoCut(
id=f"trial-{idx}",
recording=recordings[utt1],
start=0,
duration=recordings[utt1].duration,
supervisions=supervisions[utt1],
channel=0,
)
)
cuts_utt2_pos.append(
MonoCut(
id=f"trial-{idx}",
recording=recordings[utt2],
start=0,
duration=recordings[utt2].duration,
supervisions=supervisions[utt2],
channel=0,
)
)
else:
cuts_utt1_neg.append(
MonoCut(
id=f"trial-{idx}",
recording=recordings[utt1],
start=0,
duration=recordings[utt1].duration,
supervisions=supervisions[utt1],
channel=0,
)
)
cuts_utt2_neg.append(
MonoCut(
id=f"trial-{idx}",
recording=recordings[utt2],
start=0,
duration=recordings[utt2].duration,
supervisions=supervisions[utt2],
channel=0,
)
)
return {
"pos_trials": (
CutSet.from_cuts(cuts_utt1_pos),
CutSet.from_cuts(cuts_utt2_pos),
),
"neg_trials": (
CutSet.from_cuts(cuts_utt1_neg),
CutSet.from_cuts(cuts_utt2_neg),
),
}
def _prepare_voxceleb_v2(
corpus_path: Pathlike,
num_jobs: int,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Prepare manifests for the VoxCeleb2 corpus. The manifests are created the same dict
without any splits since the whole data is used in the final "train" split.
"""
# Read the speaker metadata.
speaker_metadata = {}
with open(corpus_path / "vox2_meta.csv", "r") as f:
next(f)
for line in f:
spkid, _, gender, split = map(str.strip, line.split(","))
speaker_metadata[spkid] = SpeakerMetadata(
id=spkid, name="", gender=gender, nationality="", split=split
)
# Read the wav files and prepare manifests
with ProcessPoolExecutor(num_jobs) as ex:
recordings = []
supervisions = []
futures = []
for p in (corpus_path / split).glob("*.wav"):
futures.append(
ex.submit(_process_file, p, speaker_metadata, type="command")
)
for future in tqdm(
futures,
total=len(futures),
desc=f"Processing VoxCeleb2 {split} split...",
leave=False,
):
recording, supervision = future.result()
recordings.append(recording)
supervisions.append(supervision)
recording_set = RecordingSet.from_recordings(recordings)
supervision_set = SupervisionSet.from_segments(supervisions)
manifests = {
"recordings": recording_set,
"supervisions": supervision_set,
}
return manifests
def _process_file(
file_path: Pathlike,
speaker_metadata: Dict[str, SpeakerMetadata],
) -> Tuple[Recording, SupervisionSegment]:
"""
Process a single wav file and return a Recording and a SupervisionSegment.
"""
speaker_id = file_path.parent.parent.stem
session_id = file_path.parent.stem
uttid = file_path.stem
recording_id = f"{speaker_id}-{session_id}-{uttid}"
recording = Recording.from_file(file_path, recording_id=recording_id)
supervision = SupervisionSegment(
id=recording_id,
recording_id=recording_id,
speaker=speaker_id,
gender=speaker_metadata[speaker_id].gender,
start=0.0,
duration=recording.duration,
custom={
"speaker_name": speaker_metadata[speaker_id].name,
"nationality": speaker_metadata[speaker_id].nationality,
"split": speaker_metadata[speaker_id].split,
},
)
return recording, supervision
| StarcoderdataPython |
8060313 | <gh_stars>0
#!/usr/bin/env python3
from typing import List
def merge_sort(A: List) -> List:
"""Merge sort algorithm"""
if len(A) > 1:
mid = len(A) // 2
L, R = A[:mid], A[mid:]
n1, n2 = len(L), len(R)
merge_sort(L)
merge_sort(R)
i = j = k = 0
while i < n1 and j < n2:
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
k += 1
while i < n1:
A[k] = L[i]
i += 1
k += 1
while j < n2:
A[k] = R[j]
j += 1
k += 1
return A
if __name__ == "__main__":
ints = [-2, 99, 0, -743, 2, 3, 4]
merge_sort(ints)
print(ints)
| StarcoderdataPython |
3310932 | <filename>SimG4CMS/HcalTestBeam/test/python/run2002_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
process.load("SimG4CMS.HcalTestBeam.TB2002GeometryXML_cfi")
process.load("Configuration.EventContent.EventContent_cff")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('hcaltb02.root')
)
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('CaloSim',
'EcalGeom',
'EcalSim',
'HCalGeom',
'HcalSim',
'HcalTBSim',
'FwkJob',
'VertexGenerator'),
debugModules = cms.untracked.vstring('*'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
FwkJob = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
VertexGenerator = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
EcalGeom = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HCalGeom = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
CaloSim = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
EcalSim = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HcalSim = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HcalTBSim = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
)
)
)
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.common_beam_direction_parameters = cms.PSet(
MinEta = cms.double(0.7397),
MaxEta = cms.double(0.7397),
MinPhi = cms.double(6.23955),
MaxPhi = cms.double(6.23955),
BeamPosition = cms.double(0.0)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
process.common_beam_direction_parameters,
MinE = cms.double(19.99),
MaxE = cms.double(20.01),
PartID = cms.vint32(211)
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(False)
)
process.o1 = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('sim2002.root')
)
process.Tracer = cms.Service("Tracer")
process.Timing = cms.Service("Timing")
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.g4SimHits)
process.outpath = cms.EndPath(process.o1)
process.VtxSmeared.MeanX = -420.0
process.VtxSmeared.MeanY = 18.338
process.VtxSmeared.MeanZ = -340.11
process.VtxSmeared.SigmaX = 0.000001
process.VtxSmeared.SigmaY = 0.000001
process.VtxSmeared.SigmaZ = 0.000001
process.g4SimHits.NonBeamEvent = True
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/QGSP_FTFP_BERT_EML'
process.g4SimHits.CaloSD = cms.PSet(
process.common_beam_direction_parameters,
process.common_heavy_suppression,
EminTrack = cms.double(1.0),
TmaxHit = cms.double(1000.0),
EminHits = cms.vdouble(0.0,0.0,0.0,0.0),
EminHitsDepth = cms.vdouble(0.0,0.0,0.0,0.0),
TmaxHits = cms.vdouble(1000.0,1000.0,1000.0,1000.0),
HCNames = cms.vstring('EcalHitsEB','EcalHitsEE','EcalHitsES','HcalHits'),
UseResponseTables = cms.vint32(0,0,0,0),
SuppressHeavy = cms.bool(False),
UseFineCaloID = cms.bool(False),
CheckHits = cms.untracked.int32(25),
UseMap = cms.untracked.bool(True),
Verbosity = cms.untracked.int32(0),
DetailedTiming = cms.untracked.bool(False),
CorrectTOFBeam = cms.bool(False)
)
process.g4SimHits.HCalSD.ForTBH2 = True
process.g4SimHits.CaloTrkProcessing.TestBeam = True
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
type = cms.string('HcalTB02Analysis'),
HcalTB02Analysis = cms.PSet(
Names = cms.vstring('HcalHits', 'EcalHitsEB'),
HcalClusterOnly = cms.untracked.bool(False),
Verbose = cms.untracked.bool(True)
)
))
| StarcoderdataPython |
3369832 | input = """
"""
output = """
{x(2)}
"""
| StarcoderdataPython |
9649998 | <gh_stars>1-10
import collections
import io
import logging
import ujson
class Settings(collections.MutableMapping):
def __init__(self, *args, **kwargs):
self._logger = logging.getLogger('Settings')
self.store = dict()
self.update(dict(*args, **kwargs))
if 'path' in kwargs:
self.load(kwargs['path'])
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
self.store[key] = value
def __delitem__(self, key):
del self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def load(self, path):
try:
js = io.open(
path,
mode='rb',
buffering=io.DEFAULT_BUFFER_SIZE
)
self.update(ujson.load(js))
self._logger.info('Loaded from {}', path)
except IOError as e:
self._logger.warn('Failed to open path={}; {}', path, e.message)
def save(self, path=None):
if path is None:
if 'path' in self.store:
path = self.store['path']
else:
raise KeyError('path must be specified')
js = io.open(path, mode='w', encoding='UTF-8')
ujson.dump(self, js)
js.flush()
js.close() | StarcoderdataPython |
11295559 | <filename>tcp_client.py
#-*- coding: utf-8 -*-
import socket
import sys
tcp_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
host = input("INFORME O IP COM O QUAL DESEJA SE COMUNICAR: ")
tcp_client.connect((host, 5555))
while(True):
msg = input("Informe uma mensagem p/ enviar ao servidor: ")
tcp_client.send(msg.encode('utf-8'))
data = tcp_client.recv(1024)
if (len(str(data)) >= 0):
print("RECEBIDO DO : " + data.decode('utf-8'))
else:
print("NENHUM DADO RECEBIDO....");
except ValueError:
print("Erro na conexão!")
| StarcoderdataPython |
139957 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-10 10:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard2', '0003_config_latest_value'),
]
operations = [
migrations.AlterField(
model_name='config',
name='latest_value',
field=models.IntegerField(default=20),
),
]
| StarcoderdataPython |
12824460 | """trna_generate_bed.py - generate a bed file from a fasta file of pre-tRNAs
================================================================
Purpose
-------
This script takes as an input a fasta file of pre-tRNAs and will
generate a bed file using the name of each fasta read as the
chromosome name and will output the coordinates of the mature
tRNAs as a bed file.
Usage
-----
Options
-------
-m, --merge-pairs
Output one region per fragment rather than one region per read,
thus a single region is create stretching from the start of the
frist read in pair to the end of the second.
Read pairs that meet the following criteria are removed:
* Reads where one of the pair is unmapped
* Reads that are not paired
* Reads where the pairs are mapped to different chromosomes
* Reads where the the insert size is not between the max and
min (see below)
Type::
python trna_keep_mature.py --help
for command line help.
Command line options
--------------------
"""
import sys
import re
import cgat.FastaIterator as FastaIterator
import cgatcore.iotools as IOTools
import cgatcore.experiment as E
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id$", usage=globals()["__doc__"])
(options, args) = E.start(parser, argv=argv)
if len(args) == 0:
args.append("-")
E.info(options.stdin)
fastafile = IOTools.open_file(options.stdin.name)
fasta = FastaIterator.FastaIterator(fastafile)
for line in fasta:
chrom = line.title
total_len = len(line.sequence)
trna_list = []
string = None
n = 0
for letter in line.sequence:
n += 1
if n == 1:
string = letter
else:
if string.isupper() and letter.isupper():
string = str(string) + str(letter)
elif string.isupper() and letter.islower():
trna_list.append(string)
string = letter
elif string.islower() and letter.islower():
string = str(string) + str(letter)
elif string.islower() and letter.isupper():
trna_list.append(string)
string = letter
trna_list.append(string)
start = 1
end = 1
chrom = line.title
for sequence in trna_list:
start = end
end = start + len(sequence)
if sequence.islower():
strand = chrom.split("(")[1].split(")")[0]
options.stdout.write(("%s\t%s\t%s\t%s\t%s\t%s\n")%(chrom, start, end, chrom, ".", strand))
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| StarcoderdataPython |
37082 | <filename>tests/archive/test_archive_value.py
# This file is part of the History Store (histore).
#
# Copyright (C) 2018-2021 New York University.
#
# The History Store (histore) is released under the Revised BSD License. See
# file LICENSE for full license details.
"""Unit test for archived cell values."""
import pytest
from histore.archive.value import MultiVersionValue, SingleVersionValue
from histore.archive.timestamp import SingleVersion, Timestamp, TimeInterval
def test_cell_history():
"""Test adding values to the history of a dataset row cell."""
cell = SingleVersionValue(value=1, timestamp=SingleVersion(version=1))
assert cell.at_version(version=1) == 1
assert cell.is_single_version()
assert not cell.is_multi_version()
with pytest.raises(ValueError):
cell.at_version(version=2)
assert cell.at_version(version=2, raise_error=False) is None
cell = cell.merge(value=1, version=2)
assert cell.at_version(version=1) == 1
assert cell.at_version(version=2) == 1
assert cell.diff(original_version=1, new_version=2) is None
assert cell.at_version(version=3, raise_error=False) is None
prov = cell.diff(original_version=2, new_version=3)
assert prov is not None
assert prov.old_value == 1
assert prov.new_value is None
cell = SingleVersionValue(value=1, timestamp=SingleVersion(version=1))
cell = cell.merge(value='1', version=2)
assert len(cell.values) == 2
assert cell.at_version(version=1) == 1
assert cell.at_version(version=2) == '1'
prov = cell.diff(original_version=1, new_version=2)
assert prov is not None
assert prov.old_value == 1
assert prov.new_value == '1'
with pytest.raises(ValueError):
cell.at_version(version=3)
cell = cell.merge(value=1, version=3)
assert len(cell.values) == 2
assert cell.at_version(version=1) == 1
assert cell.at_version(version=2) == '1'
assert cell.at_version(version=3) == 1
assert not cell.is_single_version()
assert cell.is_multi_version()
def test_extend_cell_value_timestamp():
"""Test extending the timestamp of a cell value."""
cell = SingleVersionValue(value=1, timestamp=SingleVersion(version=1))
cell = cell.extend(version=2, origin=1)
assert not cell.timestamp.contains(0)
assert cell.timestamp.contains(1)
assert cell.timestamp.contains(2)
assert not cell.timestamp.contains(3)
cell = cell.extend(version=4, origin=0)
assert not cell.timestamp.contains(0)
assert cell.timestamp.contains(1)
assert cell.timestamp.contains(2)
assert not cell.timestamp.contains(3)
assert not cell.timestamp.contains(4)
cell = cell.merge(value='1', version=3)
cell = cell.merge(value=1, version=4)
cell = cell.extend(version=5, origin=4)
cell = cell.extend(version=6, origin=3)
assert cell.at_version(1) == 1
assert cell.at_version(2) == 1
assert cell.at_version(3) == '1'
assert cell.at_version(4) == 1
assert cell.at_version(5) == 1
assert cell.at_version(6) == '1'
with pytest.raises(ValueError):
cell.at_version(0)
def test_rollback_multi_value():
"""Test rollback for single version values."""
value = MultiVersionValue([
SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=2, end=3)])
),
SingleVersionValue(
value=2,
timestamp=Timestamp(intervals=[TimeInterval(start=4, end=5)])
)
])
value = value.rollback(4)
assert isinstance(value, MultiVersionValue)
assert len(value.values) == 2
assert value.at_version(3) == 1
assert value.at_version(4) == 2
value = value.rollback(2)
assert isinstance(value, SingleVersionValue)
assert value.value == 1
# -- Rollback to version that did not contain the value -------------------
value = MultiVersionValue([
SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=2, end=3)])
),
SingleVersionValue(
value=2,
timestamp=Timestamp(intervals=[TimeInterval(start=4, end=5)])
)
])
assert value.rollback(1) is None
def test_rollback_single_value():
"""Test rollback for single version values."""
value = SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=1, end=3)])
)
value = value.rollback(2)
assert value.value == 1
assert value.timestamp.contains(1)
assert value.timestamp.contains(2)
assert not value.timestamp.contains(3)
assert value.rollback(0) is None
def test_value_repr():
"""Test string representations for archive values."""
value = SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=1, end=3)])
)
assert str(value) == '(1 [[1, 3]])'
value = MultiVersionValue([
SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=2, end=3)])
),
SingleVersionValue(
value=2,
timestamp=Timestamp(intervals=[TimeInterval(start=4, end=5)])
)
])
assert str(value) == '((1 [[2, 3]]), (2 [[4, 5]]))'
| StarcoderdataPython |
9775787 | <filename>dsfinterp/dsfsave.py
'''
Created on Jan 28, 2014
@author: <NAME>
'''
from logger import vlog
from abc import ABCMeta, abstractmethod
class DsfSave(object):
'''
Abstract class for dynamic structure factor savers
'''
__metaclass__ = ABCMeta
def __init__(self):
'''
Constructor
'''
self.datatype = None
@abstractmethod
def Save(self, dsf, *args):
''' This method must be implemented on every subclass '''
pass
class DsfSaveMantidWorkspace2D(DsfSave):
''' This class implements a saver into a Mantid Workspace2D '''
def __init__(self):
self.datatype='mantid::Workspace2D'
def Save(self, dsf, ws):
''' Save the dynamics structure factor into the workspace
dsf: the dynamics structure factor to save
ws: workspace where Y and E will be overwritten with dsf contents
'''
dimension = len(dsf.shape)
if dimension != 2:
vlog.error('Dimension of the dynamics structure factor is not 2')
return
nhist = dsf.shape[0]
size = dsf.shape[1]
try:
mhist = ws.getNumberHistograms()
if nhist != mhist:
vlog.error('Number of histograms in the worskpace does not match the dynamics structure factor first dimension')
return
for ihist in range(nhist):
if ws.dataY(ihist).size != size:
vlog.error('second dimension of the dynamics structure factor has different size than that of histogram with index '+str(ihist))
return
ws.dataY(ihist)[:] = dsf.intensities[ihist]
ws.dataE(ihist)[:] = dsf.errors[ihist]
except TypeError:
vlog.error('the workspace is not of type '+self.datatype)
return
class DsfSaveFactory(object):
savers = { 'mantid::Workspace2D':DsfSaveMantidWorkspace2D,
}
def __init__(self):
pass
@property
def datatypes(self):
''' Handy property returning the data types '''
return DsfSaveFactory.savers.keys()
def Instantiate(self, datatype):
''' Instantiate a dynamic structure factor saver of appropriate type '''
if datatype not in self.datatypes:
vlog.error('No dynamic structure factor loader for type {0}'.format(datatype))
return DsfSaveFactory.savers[datatype]() | StarcoderdataPython |
1763673 | import json
from pprint import pprint
from itertools import combinations
from gensim.models import TfidfModel
from gensim.corpora import Dictionary
from nltk import ngrams
from scipy.spatial.distance import cosine
from sklearn.cluster import DBSCAN
import numpy as np
from settings import *
def preprocess_blob(blob):
blob_ = blob.lower()
blob_ = blob_.replace(" ", "")
blob_ = blob_.replace(".", "")
return blob_
def character_n_gram_tokenizer(blob, n_grams=(3,7)):
blob = preprocess_blob(blob)
list_of_ngrams = []
for i in range(n_grams[0],n_grams[1]+1,1):
l = [''.join(ngram) for ngram in ngrams(list(blob), i)]
list_of_ngrams = l + list_of_ngrams
return list_of_ngrams
def entity_ngrams_generator(entities):
for entity in entities:
yield character_n_gram_tokenizer(entity)
def entity_bow_generator(entities, dictionary):
for n_grams in entity_ngrams_generator(entities):
yield dictionary.doc2bow(n_grams)
def get_tfidf_vector(tfidf, dictionary_length):
vec = np.zeros(dictionary_length)
for idx, score in tfidf:
vec[idx] = score
return vec
def create_tfidf_model(entities, n_grams_dictionary):
dictionary_length = len(n_grams_dictionary)
return TfidfModel(entity_bow_generator(entities, n_grams_dictionary))
def vectorize_entities(entities, tfidf_model, n_grams_dictionary):
dictionary_length = len(n_grams_dictionary)
vectors = []
for idx, entity_ngrams_bow in enumerate(entity_bow_generator(entities, n_grams_dictionary)):
vec = get_tfidf_vector(tfidf_model[entity_ngrams_bow], dictionary_length)
vectors.append(vec)
vectors = np.array(vectors)
return vectors
def get_cluster_map(vectors, entities, eps, min_samples):
cluster_map = {}
clustering = DBSCAN(eps=eps, min_samples=min_samples, metric='cosine')
clustering.fit(vectors)
for idx, label in enumerate(clustering.labels_):
cluster_map[entities[idx]] = label
return cluster_map
def invert_cluster(cluster_map):
inverse_cluster_map = {}
for key, value in cluster_map.items():
if not inverse_cluster_map.get(value):
inverse_cluster_map[value] = set()
inverse_cluster_map[value].add(key)
return inverse_cluster_map
def clustering(entities, eps=0.9, min_samples=2, return_inverse=False):
n_grams_dictionary = Dictionary(entity_ngrams_generator(entities))
model = create_tfidf_model(entities, n_grams_dictionary)
vectors = vectorize_entities(entities, model, n_grams_dictionary)
cluster_map = {}
if len(vectors):
cluster_map = get_cluster_map(vectors, entities, eps, min_samples)
if return_inverse:
inverse_cluster_map = invert_cluster(cluster_map)
return cluster_map, inverse_cluster_map
else:
return cluster_map
if __name__ == '__main__':
labelled_data = json.load(open(PREPROCESSED_PATH))
for label in labelled_data:
entities = [entity["entity"] for entity in label["talker"]]
cluster_map = clustering(entities)
pprint(cluster_map)
| StarcoderdataPython |
3518420 |
from flask import Flask, jsonify
from flask_restful import Resource, marshal_with
from .. import api
class Home(Resource):
def get(self):
return "OK"
api.add_resource(Home, '/')
| StarcoderdataPython |
8044675 | <reponame>jasperro/core
"""Support for switching devices via Pilight to on and off."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_SWITCHES
import homeassistant.helpers.config_validation as cv
from .base_class import SWITCHES_SCHEMA, PilightBaseDevice
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): vol.Schema({cv.string: SWITCHES_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pilight platform."""
switches = config.get(CONF_SWITCHES)
devices = []
for dev_name, dev_config in switches.items():
devices.append(PilightSwitch(hass, dev_name, dev_config))
add_entities(devices)
class PilightSwitch(PilightBaseDevice, SwitchDevice):
"""Representation of a Pilight switch."""
| StarcoderdataPython |
4907475 | <reponame>dee6600/invpend_experiment
#! /usr/bin/env python
import rospy
import random
import math
from cartpole_v0 import CartPole
class Testbed(CartPole):
def __init__(self):
CartPole.__init__(self)
self.start = rospy.Time.now()
def random_move(self):
""" Control cart with random velocity command """
rate = rospy.Rate(self.freq)
while not rospy.is_shutdown():
ob, reward, out = self.observe_env()
if self.out_range:
self.reset_env()
else:
print("=== Within range, exert random vel command ===")
vel_cmd = random.uniform(-10, 10)
self.take_action(vel_cmd)
rate.sleep()
def sin_move(self):
""" Control cart with sinusoidal velocity command """
rate = rospy.Rate(self.freq)
period_factor = 1
amplitude_factor = 25
while not rospy.is_shutdown():
ob, reward, out = self.observe_env()
if self.out_range:
self.reset_env()
else:
print("=== Within range, exert sinusoidal vel command ===")
elapsed = rospy.Time.now() - self.start
w = period_factor * elapsed.to_sec()
vel_cmd = amplitude_factor * math.cos(w*2*math.pi)
self.take_action(vel_cmd)
rate.sleep()
def main():
print("Initializing node... ")
rospy.init_node('cart_random_move')
test_agent = Testbed()
rospy.on_shutdown(test_agent.clean_shutdown)
# test_agent.random_move()
test_agent.random_move()
rospy.spin()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3590508 | # 15/15
num_of_flowers = int(input())
table = [list(map(int, input().split())) for _ in range(num_of_flowers)]
def rotate_table(tb):
new_table = [[] for i in range(num_of_flowers)]
for row in reversed(tb):
for index, flower in enumerate(row):
new_table[index].append(flower)
originals = []
for row in new_table:
last_flower = 0
originals.append(row[0])
for flower in row:
if flower > last_flower:
last_flower = flower
else:
return rotate_table(new_table)
last_original = 0
for original in originals:
if original > last_original:
last_original = original
else:
return rotate_table(new_table)
return new_table
initial_table = rotate_table(table)
for row in initial_table:
print(" ".join(map(str, row)))
| StarcoderdataPython |
9762601 | # -*- coding: utf-8 -*-
"""
Homework: Calibrate the Camera with ZhangZhengyou Method.
Picture File Folder: ".\pic\RGB_camera_calib_img", Without Distort.
By YouZhiyuan 2019.11.18
"""
import os
import numpy as np
import cv2
import glob
def calib(inter_corner_shape, size_per_grid, img_dir, img_type):
# criteria: only for subpix calibration, which is not used here.
# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
w, h = inter_corner_shape
# cp_int: corner point in int form, save the coordinate of corner points in world sapce in 'int' form
# like (0,0,0), (1,0,0), (2,0,0) ....,(10,7,0).
cp_int = np.zeros((w * h, 3), np.float32)
cp_int[:, :2] = np.mgrid[0:w, 0:h].T.reshape(-1, 2)
# cp_world: corner point in world space, save the coordinate of corner points in world space.
cp_world = cp_int * size_per_grid
obj_points = [] # the points in world space
img_points = [] # the points in image space (relevant to obj_points)
images = glob.glob(img_dir + os.sep + '**.' + img_type)
for fname in images:
img = cv2.imread(fname)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find the corners, cp_img: corner points in pixel space.
ret, cp_img = cv2.findChessboardCorners(gray_img, (w, h), None)
# if ret is True, save.
if ret == True:
# cv2.cornerSubPix(gray_img,cp_img,(11,11),(-1,-1),criteria)
obj_points.append(cp_world)
img_points.append(cp_img)
# view the corners
cv2.drawChessboardCorners(img, (w, h), cp_img, ret)
cv2.imshow('FoundCorners', img)
cv2.waitKey(1)
cv2.destroyAllWindows()
# calibrate the camera
ret, mat_inter, coff_dis, v_rot, v_trans = cv2.calibrateCamera(obj_points, img_points, gray_img.shape[::-1], None,
None)
print(("ret:"), ret)
print(("internal matrix:\n"), mat_inter)
# in the form of (k_1,k_2,p_1,p_2,k_3)
print(("distortion cofficients:\n"), coff_dis)
print(("rotation vectors:\n"), len(v_rot), v_rot)
print(("translation vectors:\n"), v_trans)
# calculate the error of reproject
total_error = 0
for i in range(len(obj_points)):
img_points_repro, _ = cv2.projectPoints(obj_points[i], v_rot[i], v_trans[i], mat_inter, coff_dis)
error = cv2.norm(img_points[i], img_points_repro, cv2.NORM_L2) / len(img_points_repro)
total_error += error
print(("Average Error of Reproject: "), total_error / len(obj_points))
return mat_inter, coff_dis
if __name__ == '__main__':
inter_corner_shape = (11, 8)
size_per_grid = 0.02
img_dir = ".\\pic\\M300_zoom_camera_2_img"
img_type = "jpg"
calib(inter_corner_shape, size_per_grid, img_dir, img_type)
| StarcoderdataPython |
3574922 | <reponame>vincent-lg/levantine<filename>src/context/character/complete.py
# Copyright (c) 2020, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ghost context to complete a new character.
This context will attempt to create a character and will then move to
another. The user has no chance to input. It's more a "responsible"
context than an active context.
"""
from pony.orm import commit, OrmError
from context.base import BaseContext
from data.character import Character
from data.room import Room
import settings
class Complete(BaseContext):
"""Ghost context to create a character."""
async def refresh(self):
"""Try to create a character."""
name = self.session.storage.get("character_name")
# Check that all data are filled
if name is None:
await self.msg(
"Hmmm... something went wrong. What was your character name again?"
)
await self.move("character.name")
return
# Attempt to create the character
try:
character = Character(name=name)
commit()
except OrmError:
await self.msg("Some error occurred. We'll have to try again.")
await self.move("character.name")
return
character.account = self.session.account
self.session.storage["character"] = character
character.storage["saved_location"] = Room.get(
barcode=settings.START_ROOM)
await self.msg(f"The character named {name} was created successfully.")
await self.move("connection.login")
| StarcoderdataPython |
1644027 | <gh_stars>0
arp_table = [('10.220.88.1', '0062.ec29.70fe'),
('10.220.88.20', 'c89c.1dea.0eb6'),
('10.220.88.21', '1c6a.7aaf.576c'),
('10.220.88.28', '5254.aba8.9aea'),
('10.220.88.29', '5254.abbe.5b7b'),
('10.220.88.30', '5254.ab71.e119'),
('10.220.88.32', '5254.abc7.26aa'),
('10.220.88.33', '5254.ab3a.8d26'),
('10.220.88.35', '5254.abfb.af12'),
('10.220.88.37', '0001.00ff.0001'),
('10.220.88.38', '0002.00ff.0001'),
('10.220.88.39', '6464.9be8.08c8'),
('10.220.88.40', '001c.c4bf.826a'),
('10.220.88.41', '001b.7873.5634')]
for ip_addr, mac_addr in arp_table:
mac_addr = mac_addr.upper()
mac_addr = mac_addr.split('.')
mac_addr = "".join(mac_addr)
#Process 2 hex digits at a time
new_mac = []
while len(mac_addr) > 0:
two_digits = mac_addr[:2]
mac_addr = mac_addr[2:]
new_mac.append(two_digits)
#Join back the digits with a colon
new_mac = ":".join(new_mac)
print(new_mac)
| StarcoderdataPython |
9663256 | import click
from awsscripter.stack.helpers import catch_exceptions, confirmation
from awsscripter.stack.helpers import get_stack_or_env
from awsscripter.stack.stack_status import StackStatus
@click.command(name="create")
@click.argument("path")
@click.argument("change-set-name", required=False)
@click.option(
"-y", "--yes", is_flag=True, help="Assume yes to all questions."
)
@click.pass_context
@catch_exceptions
def create_command(ctx, path, change_set_name, yes):
"""
Creates a stack or a change set.
Creates a stack for a given config PATH. Or if CHANGE_SET_NAME is specified
creates a change set for stack in PATH.
"""
action = "create"
stack, _ = get_stack_or_env(ctx, path)
if change_set_name:
confirmation(action, yes, change_set=change_set_name, stack=path)
stack.create_change_set(change_set_name)
else:
confirmation(action, yes, stack=path)
response = stack.create()
if response != StackStatus.COMPLETE:
exit(1)
| StarcoderdataPython |
239544 | import sys
import cv2
import random
import numpy as np
import pandas as pd
from Ui_no5_ui import Ui_MainWindow
from matplotlib import pyplot as plt
from PyQt5.QtWidgets import QMainWindow, QApplication
import keras
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, Flatten
_LABELS = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
class MainWindow(QMainWindow, Ui_MainWindow):
label_11 = None
x_train = None
y_train = None
x_test = None
y_test = None
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.onBindingUI()
def onBindingUI(self):
self.pushButton_13.clicked.connect(self.on_btn5_1_click)
self.pushButton_14.clicked.connect(self.on_btn5_2_click)
self.pushButton_15.clicked.connect(self.on_btn5_3_click)
self.pushButton_16.clicked.connect(self.on_btn5_4_click)
self.pushButton_17.clicked.connect(self.on_btn5_5_click)
def on_btn5_1_click(self):
self.label_11.clear()
#Import dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
self.x_train = x_train.astype('float32')
self.x_test = x_test.astype('float32')
self.y_train = keras.utils.to_categorical(y_train, 10)
self.y_test = keras.utils.to_categorical(y_test, 10)
#print(self.x_train ,self.x_test ,'分隔', self.y_train , self.y_test)
#plot_images_labels
fig = plt.figure('5.1 Show Train Images',figsize=(10,5))
fig.subplots_adjust(hspace=0.0,wspace=0.4)
for i in range(0, 10): #依序顯示 num 個子圖
ax = fig.add_subplot(2, 5, i+1) #建立 2*5 個子圖中的第 i+1 個
temp = random.randint(0,9999)
x_train_resize=cv2.resize(self.x_train[temp], (128, 128))
ax.imshow(np.uint8(x_train_resize))
ax.set_title(_LABELS[list(self.y_train[temp]).index(1)],fontsize=10) #設定標題
ax.set_xticks([]); #不顯示 x 軸刻度
ax.set_yticks([]); #不顯示 y 軸刻度
plt.show()
def on_btn5_2_click(self):
print('hyperparameters:')
print('batch size:', 32)
print('learning rate:', 0.001)
print('optimizer:','SGD')
def on_btn5_3_click(self):
input_shape = (32, 32, 3)
model = Sequential()
model.add(VGG16(weights=None, include_top=False,input_shape=input_shape))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dense(4096, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
def on_btn5_4_click(self):
Accurancy = cv2.imread('Accurancy.png')
loss = cv2.imread('loss.png')
image = cv2.vconcat([Accurancy,loss])
cv2.imshow('5.4 Show Accuracy', image)
def on_btn5_5_click(self):
self.label_11.clear()
if self.x_train is None or self.y_train is None or self.x_test is None or self.y_test is None :
print('Please run "5.1 Show Train Images" first.')
self.label_11.setText('Warning!! Please run "5.1 Show Train Images" first.')
self.label_11.setStyleSheet("color: rgb(255, 0, 0);")
#print(self.x_train ,self.y_train ,self.x_test , self.y_test)
elif not len(self.lineEdit_5.text()) :
print('Please enter (0~9999) first.')
self.label_11.setText('Warning!! Please enter (0~9999) first.')
self.label_11.setStyleSheet("color: rgb(255, 0, 0);")
else :
INDEX = int(self.lineEdit_5.text())
model = load_model("cifar10_vgg16.h5")
probabilities = model.predict(self.x_test)
plt.figure('Estimation result',figsize=(10, 6), dpi=80)
plt.bar(["airplain", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"], list(probabilities[INDEX]), 0.5)
plt.title('Estimation result')
x_test = self.x_test.astype(np.uint8)
plt.figure('image')
plt.imshow(cv2.resize(x_test[INDEX], (128, 128)))
title = _LABELS[list(self.y_test[INDEX]).index(1)]
plt.title(title, fontsize=12)
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_()) | StarcoderdataPython |
3581216 | import os;
import sys;
from Npp import *
##
# @brief set the path to cook
path="D:\\Users\\draap\\Desktop\\cooking"
##
# @brief Do some operate about the file
#
def run_menu_command():
##### Space to TAB
# Edit->Blank Operations
notepad.runMenuCommand("Blank Operations", "Trim Trailing Space")
# notepad.runMenuCommand("Blank Operations", "TAB to Space")
#notepad.runMenuCommand("Blank Operations", "Space to TAB (All)")
##### Convert Encoding to UTF-8
# notepad.runMenuCommand("Encoding", "Encode in ANSI")
# notepad.runMenuCommand("Encoding", "Convert to UTF-8")
return
##
# @brief Find and operate files opened at disk
#
def operate_file_in_path(file_path = path):
for root, dirs, file in os.walk(file_path):
for fn in file:
if fn[-2:] == '.c' or fn[-2:] == '.h' or fn[-3:] == '.py' :
notepad.open(root + "\\" + fn)
run_menu_command()
notepad.save()
notepad.close()
operate_file_in_path()
| StarcoderdataPython |
3302536 | <filename>lib_bre/lib_bre/__init__.py<gh_stars>0
from .transformers import *
from .library import * | StarcoderdataPython |
6460456 | """
Схемы graphql.
"""
import graphene
from .querys import RootQuery
# from .mutations import Login
schema = graphene.Schema(RootQuery)
"""
<EMAIL>
asgagag
"""
| StarcoderdataPython |
8047200 | from typing import Dict
from Tools import counter as c
from enum import Enum
import copy
class GameError(Exception):
pass
class CatSearchingError(GameError):
pass
class FrozenError(GameError):
def __init__(self, type_of):
super(FrozenError, self).__init__(self, f"This {type_of} is frozen")
class CatGender(Enum):
male = 0
female = 1
class Cat:
def __init__(self, skills: "Skills", name: "Name", gender: CatGender, clan: "Clan"):
self.skills: "Skills" = skills
self.name: Name = name
self.gender: CatGender = gender
self.clan: "Clan" = clan
self.is_frozen: bool = False
def _check_editing(self):
if self.is_frozen:
raise FrozenError("cat")
def freeze(self):
self._check_editing()
self.is_frozen = True
def join_clan(self, clan: "Clan"):
self._check_editing()
self.clan.on_cat_leaving(self)
self.clan = clan
clan.on_cat_joining(self)
def get_name(self) -> str:
return str(self.name)
def take_damage(self):
self._check_editing()
try:
self.skills.take_damage()
except c.CounterOutOfRangeError:
self.kill()
def kill(self):
self.freeze()
def __hash__(self):
return super(Cat, self).__hash__()
def __eq__(self, other):
return self is other
class Kitten(Cat):
def __init__(self, name: "Name", gender: CatGender, clan: "Clan"):
skills = Skills(2, False, False, can_move=False)
super().__init__(skills, name, gender, clan)
class Paw(Cat):
def __init__(self, name: "Name", gender: CatGender, clan: "Clan"):
skills = Skills(3, False, True)
super().__init__(skills, name, gender, clan)
class MedicPaw(Cat):
def __init__(self, name: "Name", gender: CatGender, clan: "Clan"):
skills = Skills(3, False, True, 1, 1, 1, True, False, True)
super().__init__(skills, name, gender, clan)
class Name:
def __init__(self, first_name: str, prefix: str = None):
self.first_name = first_name
self.prefix = prefix
def __str__(self):
return f"{self.first_name} {self.prefix}"
def __hash__(self):
return super(Name, self).__hash__()
def __eq__(self, other):
return self is other
class Skills:
def __init__(self, max_health: int = 3, can_learn: bool = False, can_learn_code: bool = False,
max_stat_points: int = 4, max_hunting: int = 3, max_fighting: int = 3, can_move: bool = True,
can_heal: bool = False, can_learn_healing: bool = False):
self.can_learn_healing = can_learn_healing
self.can_heal = can_heal
self.learned_healing: bool = False
self.max_stat_points: int = max_stat_points
self.max_fighting = max_fighting
self.max_hunting = max_hunting
self.max_health: int = max_health
self.can_learn: bool = can_learn
self.can_learn_code: bool = can_learn_code
self.learned_code: bool = False
self.can_move: bool = can_move
self.health: c.Counter = c.Counter(0, self.max_health)
self.stats: c.LinkedCounters = c.LinkedCounters(0, self.max_stat_points)
self.fighting: c.LinkedCounter = self.stats.set_counter("fighting", c.Counter(0, self.max_fighting))
self.hunting: c.LinkedCounter = self.stats.set_counter("hunting", c.Counter(0, self.max_hunting))
def reset(self):
self.health = c.Counter(0, self.max_health)
self.stats = c.LinkedCounters(0, self.max_stat_points)
self.fighting = self.stats.set_counter("fighting", c.Counter(0, self.max_fighting))
self.hunting = self.stats.set_counter("hunting", c.Counter(0, self.max_hunting))
def take_damage(self):
self.health.back()
def learn_code(self):
if self.can_learn_code:
self.learned_code = True
self.can_learn = True
def learn_hunting(self):
if self.can_learn:
self.hunting.step()
def learn_healing(self):
if self.can_learn_healing:
self.learned_healing = True
def clone(self):
return copy.deepcopy(self)
class Clan:
def __init__(self, name: str):
self.name = name
self.herbs = c.Counter(0, -1)
self.prey = c.Counter(0, -1)
self.cats: Dict[Name, Cat] = {}
self.is_frozen: bool = False
def _check_editing(self):
if self.is_frozen:
raise FrozenError("clan")
def freeze(self):
self.is_frozen = True
for cat_name in self.cats:
self.cats[cat_name].freeze()
def add_cat(self, cat: Cat):
self._check_editing()
self.cats[cat.name] = cat
if cat.clan is not self:
cat.join_clan(self)
def get_cat(self, cat_name: Name) -> Cat:
if cat_name in self.cats:
return self.cats[cat_name]
raise CatSearchingError("Cat not found")
def get_cat_by_name(self, cat_str_name: str) -> Cat:
for e in self.cats:
if str(e) == cat_str_name:
return self.cats[e]
raise CatSearchingError("Cat not found")
def on_cat_joining(self, cat: Cat):
self._check_editing()
self.add_cat(cat)
def on_cat_leaving(self, cat: Cat):
self._check_editing()
if cat.name in self.cats:
del self.cats[cat.name]
def register(self):
Game.instance.add_clan(self)
class Game:
instance: "Game" = None
def __init__(self):
self.instance = self.instance or self
self.clans: Dict[str, Clan] = {}
def add_clan(self, clan: Clan):
self.clans[clan.name] = clan
def get_clan(self, clan_name: str) -> Clan:
if clan_name in self.clans:
return self.clans[clan_name]
raise GameError("Couldn't find clan.")
| StarcoderdataPython |
1998654 | import json
import pathlib
import pytest
import znjson
@pytest.fixture
def simple_dict():
return {"a": 10, "b": 20}
def test_encoder_serializable(simple_dict):
_ = json.dumps(simple_dict, cls=znjson.ZnEncoder)
def test_decoder_serializable(simple_dict):
data_str = json.dumps(simple_dict, cls=znjson.ZnEncoder)
assert simple_dict == json.loads(data_str, cls=znjson.ZnDecoder)
def test_decode_pathlib():
data_str = '{"_type": "pathlib.Path", "value": "test_path.txt"}'
assert json.loads(data_str, cls=znjson.ZnDecoder) == pathlib.Path("test_path.txt")
def test_decode_pathlib_wo__type():
data_str = '{"value": "test_path.txt"}'
assert json.loads(data_str, cls=znjson.ZnDecoder) == {"value": "test_path.txt"}
def test_decode_pathlib_wo_value():
data_str = '{"_type": "pathlib.Path"}'
assert json.loads(data_str, cls=znjson.ZnDecoder) == {"_type": "pathlib.Path"}
def test_not_encodeable():
function = lambda x: x
with pytest.raises(TypeError):
json.dumps(function, cls=znjson.ZnEncoder)
def test_not_decodeable():
data_str = '{"_type": "unknown", "value": ""}'
with pytest.raises(TypeError):
json.loads(data_str, cls=znjson.ZnDecoder)
| StarcoderdataPython |
3303622 | import json
import numpy as np
from scipy.special import wofz
class spec(object):
"""docstring for spectra."""
def __init__(self,dict,nruns=1,nmodel=1,out_dir='./_output/',
npoints=1000,
spec_max = -0.1,
spec_min = 1.):
super(spec, self).__init__()
self.nruns=str(nruns)
self.out_dir=out_dir
self.npoints=npoints
self.xmax=spec_max
self.xmin=spec_min
self.spec_raw = out_dir+'/{nr}_rixs_raw.csv'.format(nm=nmodel,
nr=nruns)
self.x,self.y=np.transpose(np.loadtxt(self.spec_raw))
# if int(dict['vib_space'])==1:
# # self.xmin=-dict['omega_ph0']
# # self.xmax=dict['omega_ph0']*dict['nf']
# self.xmax=spec_max
# self.xmin=spec_min
#
# elif int(dict['vib_space'])==2:
# max_=max(dict['omega_ph0'],dict['omega_ph1'])
# self.xmin=-max_
# self.xmax=max_*dict['nf']
# else:
# print('something went wrong')
# print(dict['vib_space'])
self.gamma_ph=dict['gamma_ph']
self.alpha_exp=dict['alpha_exp']
self.save_noel=\
self.out_dir+'/{nr}_rixs_phonons.csv'.format(nm=nmodel,
nr=nruns)
self.save_full=\
self.out_dir+'/{nr}_rixs_full.csv'.format(nm=nmodel,
nr=nruns)
def run_broad(self):
x=np.linspace(self.xmin,self.xmax,(self.npoints))
full, noelastic=np.zeros_like(x),np.zeros_like(x)
for en,inten in zip(self.x, self.y):
shape=self.voigt(x-en,self.alpha_exp,self.gamma_ph)
norm=np.sum(shape)*abs(x[0]-x[1])
y=(shape)*inten/norm
if en!=0: noelastic=noelastic+y
full=full+y
np.savetxt(self.save_full,np.column_stack([x,full]))
np.savetxt(self.save_noel,np.column_stack([x,noelastic]))
def run_broad_fit(self,x=[]):
full, noelastic=np.zeros_like(x),np.zeros_like(x)
for en,int in zip(self.x, self.y):
shape=self.voigt(x-en,self.alpha_exp,self.gamma_ph)
norm=np.sum(shape)*abs(x[0]-x[1])
y=(shape)*int/norm
if en!=0: noelastic=noelastic+y
full=full+y
return x,noelastic,full
def voigt(self,x, alpha, gamma):
sigma = alpha / np.sqrt(2 * np.log(2))
return np.real(wofz((x + 1j*gamma)/sigma/np.sqrt(2))) / sigma\
/np.sqrt(2*np.pi)
| StarcoderdataPython |
210241 | <gh_stars>10-100
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '<KEY>'
| StarcoderdataPython |
3237828 | #Aula 10 do Curso Python em Video!
#By Rafabr
import time,sys,subprocess
subprocess.run(['clear'])
print('\n'+'*'*80)
print('Aula 10 - Exemplos e Testes'.center(80)+'\n')
print('Questionário sobre carros:')
tem_carro = str(input('Voçe possui carro? (s/n) : ')).strip().lower()
if tem_carro == 's':
pass
else:
if tem_carro == 'n':
print('Voçe não está apto a participar da pesquisa!')
time.sleep(2)
sys.exit()
else:
print('Voçe digitou uma informação inválida!')
time.sleep(2)
sys.exit()
carro = str(input('Informe a marca e modelo do seu carro (Ex. Ford Ká): '))
carro_tempo = int(input('Há quantos anos voçe comprou o carro? '))
ano_compra = int(time.strftime('%Y')) - carro_tempo
print(f"\nVoçe possui o seguinte carro: {carro}")
print(f'Seu Carro tem {carro_tempo} anos, logo voçe comprou ele em {ano_compra}')
if carro_tempo >= 5:
print("Seu carro já tá meio velinho né!")
else:
print('Seu carro ainda é novinho!')
print('Obrigado pelas informações!')
print('\nFim da execução\n')
print('\n'+'*'*80)
time.sleep(2)
| StarcoderdataPython |
4861468 | <reponame>llduyll10/film_project<gh_stars>0
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework.pagination import PageNumberPagination
from rest_framework.generics import ListAPIView
from rest_framework.filters import SearchFilter, OrderingFilter
from account.models import Account
from film.models import FilmPost
from .serializers import FilmPostCreateSerializers, FilmPostUpdateSerializers, FilmPostSerializers
SUCCESS = 'success'
ERROR = 'error'
DELETE_SUCCESS = 'deleted'
UPDATE_SUCCESS = 'updated'
CREATE_SUCCESS = 'created'
# Url: https://<your-domain>/api/film/create
# Headers: Authorization: Token <token>
@api_view(['POST', ])
@permission_classes((IsAuthenticated,))
def api_create_film(request):
if request.method == 'POST':
data = request.data
data['author'] = request.user.pk
checkExists = FilmPost.objects.filter(
title=request.data['title']).count()
if checkExists > 0:
return Response({'status': 'error', 'msg': 'Title is available'}, status=status.HTTP_400_BAD_REQUEST)
serializer = FilmPostCreateSerializers(data=data)
data = {}
if serializer.is_valid():
film_post = serializer.save()
data['response'] = CREATE_SUCCESS
data['pk'] = film_post.pk
data['title'] = film_post.title
data['directors'] = film_post.directors
data['urlFilm'] = film_post.urlFilm
data['date_published'] = film_post.date_published
data['contentFilm'] = film_post.contentFilm
data['slug'] = film_post.slug
data['typeFilm'] = film_post.typeFilm
image_url = str(request.build_absolute_uri(film_post.image.url))
data['image'] = image_url
data['author'] = film_post.author.username
return Response(data=data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Url: https://<your-domain>/api/film/<slug>/update
# Headers: Authorization: Token <token>
@api_view(['PUT', ])
@permission_classes((IsAuthenticated,))
def api_update_film(request, slug):
try:
film_post = FilmPost.objects.get(slug=slug)
except FilmPost.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user = request.user
if film_post.author != user:
return Response({"msg": "You don't have permission to edit that."})
if request.method == 'PUT':
serializer = FilmPostUpdateSerializers(
film_post, data=request.data, partial=True)
data = {}
if serializer.is_valid():
serializer.save()
data['response'] = UPDATE_SUCCESS
data['pk'] = film_post.pk
data['title'] = film_post.title
data['directors'] = film_post.directors
data['urlFilm'] = film_post.urlFilm
data['date_published'] = film_post.date_published
data['contentFilm'] = film_post.contentFilm
data['slug'] = film_post.slug
data['typeFilm'] = film_post.typeFilm
image_url = str(request.build_absolute_uri(film_post.image.url))
data['image'] = image_url
data['author'] = film_post.author.username
return Response(data=data, status=status.HTTP_200_OK)
return Response({'status': 'error'}, serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Url: https://<your-domain>/api/film/<slug>/
# Headers: Authorization: Token <token>
@api_view(['GET',])
@permission_classes((IsAuthenticated,))
def api_film_post_detail(request,slug):
try:
film_post = FilmPost.objects.get(slug=slug)
except FilmPost.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = FilmPostSerializers(film_post)
return Response(serializer.data)
# Url: https://<your-domain>/api/film/<slug>/delete
# Headers: Authorization: Token <token>
@api_view(['DELETE',])
@permission_classes((IsAuthenticated,))
def api_film_post_delete(request, slug):
try:
film_post = FilmPost.objects.get(slug=slug)
except FilmPost.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user = request.user
if film_post.author != user:
return Response({"msg":"You don't have permission to delete that."})
if request.method == 'DELETE':
operations = film_post.delete()
data = {}
if operations:
data['response'] = DELETE_SUCCESS
return Response(data=data, status=status.HTTP_200_OK)
# Url:
# 1) list: https://<your-domain>/api/film/list
# 2) pagination: http://<your-domain>/api/film/list?page=2
# 3) search: http://<your-domain>/api/film/list?search=mitch
# 4) ordering: http://<your-domain>/api/film/list?ordering=-date_updated
# 4) search + pagination + ordering: <your-domain>/api/film/list?search=mitch&page=2&ordering=-date_updated
# Headers: Authorization: Token <token>
class ApiListFilm(ListAPIView):
queryset = FilmPost.objects.all()
serializer_class = FilmPostSerializers
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
pagination_class = PageNumberPagination
filter_backends = (SearchFilter, OrderingFilter)
search_fields = ('title','author__username','directors','typeFilm')
| StarcoderdataPython |
11221349 | """ Prisma Cloud Compute API Images Endpoints Class """
import urllib.parse
# Credentials (Manage > Authentication > Credentials store)
class CredentialsPrismaCloudAPIComputeMixin:
""" Prisma Cloud Compute API Credentials Endpoints Class """
def credential_list_read(self):
return self.execute_compute('GET', 'api/v1/credentials')
def credential_list_create(self, body):
return self.execute_compute(
'POST', 'api/v1/credentials?project=Central+Console',
body_params=body
)
def credential_list_delete(self, cred):
return self.execute_compute(
'DELETE', '/api/v1/credentials/%s' % urllib.parse.quote(cred)
)
def credential_list_usages_read(self, cred):
return self.execute_compute(
'GET', '/api/v1/credentials/%s/usages' % urllib.parse.quote(cred)
)
| StarcoderdataPython |
3503753 | <reponame>ramavarjah/flask-booking<gh_stars>0
from .utils import thingworx
| StarcoderdataPython |
1906327 |
import requests
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import common.functions as functions
from common.xml_validator import xml_validator
class heredicare_interface:
def __init__(self):
self.base_url = "https://portal.img.med.uni-tuebingen.de/ahdoebm1/HerediCareAPI/v1/"
self.variant_validator = xml_validator('/mnt/users/ahdoebm1/HerediVar/doc/api/variant.xsd')
def get_heredicare_vid_list(self):
execution_code = 0 # everything worked well
list_validator = xml_validator('/mnt/users/ahdoebm1/HerediVar/doc/api/vid_list.xsd')
v_id_list_request = requests.get(self.base_url + 'vid_list.php')
#res = validator.validate('/mnt/users/ahdoebm1/HerediVar/src/tools/mock-api/vid_list.xml')
v_id_list_xml = v_id_list_request.text
is_valid = list_validator.validate(v_id_list_xml)
if not is_valid:
execution_code = 1 # fatal error: returned xml is not valid
return execution_code, []
vid_obj = list_validator.object_from_string(v_id_list_xml)
vids_heredicare = []
for vid in vid_obj.VId:
vids_heredicare.append(vid.get('id'))
return execution_code, vids_heredicare
def get_variant(self, vid):
execution_code = 0
chr = ''
pos = ''
ref = ''
alt = ''
reference_genome_build = ''
heredicare_variant = None
variant_request = requests.get(self.base_url + 'variant.php?id=' + str(vid))
variant_xml = variant_request.text
# get orig variant
heredicare_variant = self.variant_validator.object_from_string(variant_xml)
chr = heredicare_variant.get('chr', '')
pos = heredicare_variant.get('pos', '')
ref = heredicare_variant.get('ref', '')
alt = heredicare_variant.get('alt', '')
reference_genome_build = heredicare_variant.get('genome_build', '')
# errors
if chr == '' or pos == '' or alt == '' or ref == '' or reference_genome_build == '':
execution_code = 3
return execution_code, chr, pos, ref, alt, reference_genome_build, heredicare_variant
chr_num = functions.validate_chr(chr)
if not chr_num:
execution_code = 2
return execution_code, chr, pos, ref, alt, reference_genome_build, heredicare_variant
chr = 'chr' + chr_num
is_valid_variant = self.variant_validator.validate(variant_xml)
#print(variant_xml)
if not is_valid_variant:
execution_code = 1
return execution_code, chr, pos, ref, alt, reference_genome_build, heredicare_variant
# everything fine!
return execution_code, chr, pos, ref, alt, reference_genome_build, heredicare_variant | StarcoderdataPython |
1961977 | <filename>openvariant/annotation/annotation.py
"""
Annotation
====================================
A core class to represent the schema which files will be parsed.
"""
import logging
import re
from typing import List
from yaml import safe_load, YAMLError
from openvariant.annotation.builder import AnnotationTypesBuilders
from openvariant.config.config_annotation import (AnnotationGeneralKeys, AnnotationKeys, AnnotationTypes,
ExcludesKeys, DEFAULT_FORMAT, DEFAULT_DELIMITER,
AnnotationFormat, AnnotationDelimiter)
def _check_general_keys(annot: dict) -> None:
"""Check if general annotations are writen in a proper format"""
# Pattern key
if AnnotationGeneralKeys.PATTERN.value not in annot or not isinstance(
annot[AnnotationGeneralKeys.PATTERN.value], list) \
and not all(isinstance(x, str) for x in annot[AnnotationGeneralKeys.PATTERN.value]):
raise KeyError(f"'{AnnotationGeneralKeys.PATTERN.value}' key not found or is not a str.")
# Recursive key
if AnnotationGeneralKeys.RECURSIVE.value in annot and \
not isinstance(annot[AnnotationGeneralKeys.RECURSIVE.value], bool):
raise KeyError(f"'{AnnotationGeneralKeys.RECURSIVE.value}' key is not a boolean.")
# Format key
if AnnotationGeneralKeys.FORMAT.value in annot and \
(not isinstance(annot[AnnotationGeneralKeys.FORMAT.value], str) or
annot[AnnotationGeneralKeys.FORMAT.value].upper() not in [e.name for e in AnnotationFormat]):
raise KeyError(f"'{AnnotationGeneralKeys.FORMAT.value}' key is not a string.")
# Delimiter key
if AnnotationGeneralKeys.DELIMITER.value in annot and \
(not isinstance(annot[AnnotationGeneralKeys.DELIMITER.value], str) or
annot[AnnotationGeneralKeys.DELIMITER.value].upper() not in [e.name for e in AnnotationDelimiter]):
raise KeyError(f"'{AnnotationGeneralKeys.DELIMITER.value}' key is not valid or is not a string.")
# Columns key
if AnnotationGeneralKeys.COLUMNS.value in annot and \
not isinstance(annot[AnnotationGeneralKeys.COLUMNS.value], list):
raise KeyError(f"'{AnnotationGeneralKeys.COLUMNS.value}' key is not a list.")
# Annotations key
if AnnotationGeneralKeys.ANNOTATION.value in annot and \
not isinstance(annot[AnnotationGeneralKeys.ANNOTATION.value], list):
raise KeyError(f"'{AnnotationGeneralKeys.ANNOTATION.value}' key is not a list.")
# Excludes key
if AnnotationGeneralKeys.EXCLUDE.value in annot and \
(not isinstance(annot[AnnotationGeneralKeys.EXCLUDE.value], list) or
not all([ExcludesKeys.FIELD.value in x and ExcludesKeys.VALUE.value in x
for x in annot[AnnotationGeneralKeys.EXCLUDE.value]])):
raise KeyError(f"'{AnnotationGeneralKeys.EXCLUDE.value}' key in bad format.")
def _check_annotation_keys(annot: dict) -> None:
"""Check if annotation keys are writen in a proper format"""
# Type key
if AnnotationKeys.TYPE.value not in annot or not isinstance(annot[AnnotationKeys.TYPE.value], str):
raise KeyError(f"'{AnnotationKeys.TYPE.value}' key not found or is not a str.")
if annot[AnnotationKeys.TYPE.value] not in [e.value for e in AnnotationTypes]:
raise ValueError(f"'{AnnotationKeys.TYPE.value}' value is wrong.")
# Field key
if AnnotationKeys.FIELD.value not in annot or not isinstance(annot[AnnotationKeys.FIELD.value], str):
raise KeyError(f"'{AnnotationKeys.FIELD.value}' key not found or is not a str.")
# Value key
if (annot[AnnotationKeys.TYPE.value] == AnnotationTypes.STATIC.value) and \
not isinstance(annot[AnnotationKeys.VALUE.value], str):
raise KeyError(f"'{AnnotationKeys.VALUE.value}' key not found or is not a str.")
# Field source key
if (annot[AnnotationKeys.TYPE.value] == AnnotationTypes.INTERNAL.value or
annot[AnnotationKeys.TYPE.value] == AnnotationTypes.PLUGIN.value or
annot[AnnotationKeys.TYPE.value] == AnnotationTypes.MAPPING.value) and \
AnnotationKeys.FIELD_SOURCE.value in annot and \
not isinstance(annot[AnnotationKeys.FIELD_SOURCE.value], list):
raise KeyError(f"'{AnnotationKeys.FIELD_SOURCE.value}' key not found or is not a list.")
# Dirname and filename key
if (annot[AnnotationKeys.TYPE.value] == AnnotationTypes.DIRNAME.value or
annot[AnnotationKeys.TYPE.value] == AnnotationTypes.FILENAME.value or
annot[AnnotationKeys.TYPE.value] == AnnotationTypes.INTERNAL.value) and \
AnnotationKeys.FUNCTION.value in annot and \
re.compile("lambda[' ']+[a-zA-Z0-9]+[' ']*:[' ']*.*").search(annot[AnnotationKeys.FUNCTION.value]) is None:
raise ValueError(f"'{AnnotationKeys.FUNCTION.value}' value is not an appropriated lambda function.")
# Plugin key
if annot[AnnotationKeys.TYPE.value] == AnnotationTypes.PLUGIN.value and \
AnnotationKeys.PLUGIN.value not in annot:
raise KeyError(f"'{AnnotationKeys.PLUGIN.value}' key not found.")
if annot[AnnotationKeys.TYPE.value] == AnnotationTypes.PLUGIN.value and \
(AnnotationKeys.PLUGIN.value in annot and not isinstance(annot[AnnotationKeys.PLUGIN.value], str)):
raise ValueError(f"'{AnnotationKeys.PLUGIN.value}' is not a str.")
# Mapping keys
if annot[AnnotationKeys.TYPE.value] == AnnotationTypes.MAPPING.value and \
(AnnotationKeys.FIELD_SOURCE.value not in annot or
AnnotationKeys.FIELD_MAPPING.value not in annot or
AnnotationKeys.FILE_MAPPING.value not in annot or
AnnotationKeys.FIELD_VALUE.value not in annot or
not isinstance(annot[AnnotationKeys.FIELD_SOURCE.value], list) or
not isinstance(annot[AnnotationKeys.FIELD_MAPPING.value], str) or
not isinstance(annot[AnnotationKeys.FILE_MAPPING.value], str) or
not isinstance(annot[AnnotationKeys.FIELD_VALUE.value], str)):
raise KeyError(f"'{AnnotationTypes.MAPPING.value}' not annotated well.")
class Annotation:
"""A representation of the schema that files will be parsed"""
def _read_annotation_file(self) -> dict:
"""Read annotation file with YAML package"""
with open(self._path, 'r') as stream:
try:
return safe_load(stream)
except YAMLError as exc:
logging.error(exc)
stream.close()
def __init__(self, annotation_path: str) -> None:
"""
Inits Annotation with annotation file path.
Parameters
---------
annotation_path : str
A string path where Annotation file is located.
"""
self._path = annotation_path
raw_annotation = self._read_annotation_file()
_check_general_keys(raw_annotation)
for annot in raw_annotation.get(AnnotationGeneralKeys.ANNOTATION.value, []):
_check_annotation_keys(annot)
patterns = raw_annotation[AnnotationGeneralKeys.PATTERN.value]
self._patterns = patterns if isinstance(patterns, List) else [patterns]
self._recursive = raw_annotation.get(AnnotationGeneralKeys.RECURSIVE.value, True)
self._delimiter = raw_annotation.get(AnnotationGeneralKeys.DELIMITER.value, DEFAULT_DELIMITER).upper()
self._format = raw_annotation.get(AnnotationGeneralKeys.FORMAT.value, DEFAULT_FORMAT).replace('.', '')
self._excludes: dict = {}
for k in raw_annotation.get(AnnotationGeneralKeys.EXCLUDE.value, []):
key_exclude = k[AnnotationKeys.FIELD.value]
value_exclude = k[AnnotationKeys.VALUE.value]
if key_exclude in self._excludes:
self._excludes[key_exclude].append(value_exclude)
else:
self._excludes[key_exclude] = [value_exclude]
self._annotations: dict = {}
for k in raw_annotation.get(AnnotationGeneralKeys.ANNOTATION.value, []):
self._annotations[k[AnnotationKeys.FIELD.value]] = \
AnnotationTypesBuilders[k[AnnotationKeys.TYPE.value].upper()].value(k, self._path)
self._columns = raw_annotation.get(AnnotationGeneralKeys.COLUMNS.value, list(self.annotations.keys()))
self._check_columns()
def _check_columns(self) -> None:
"""Check if columns exists as annotation fields"""
for col in self._columns:
if col not in self._annotations:
raise KeyError(f"'{col}' column unable to find.")
@property
def path(self) -> str:
"""str: path where annotation file is located"""
return self._path
@property
def patterns(self) -> List[str]:
"""List[str]: files patterns that annotation will match"""
return self._patterns
@property
def format(self) -> str:
"""str: output format that will have parsed files"""
return self._format
@property
def delimiter(self) -> str:
"""str: delimiter that annotation will read on files"""
return self._delimiter
@property
def columns(self) -> List:
"""List: columns that will appear on parsed output files"""
return self._columns
@property
def annotations(self) -> dict:
"""dict: annotation that will cover Annotation object"""
return self._annotations
@property
def excludes(self) -> dict:
"""List: values that will be excluded after the parsing"""
return self._excludes
@property
def structure(self) -> dict:
"""dict: general structure of Annotation schema"""
structure_aux = {AnnotationGeneralKeys.ANNOTATION.name: self._annotations,
AnnotationGeneralKeys.EXCLUDE.name: self._excludes}
return {e: structure_aux for e in self._patterns}
| StarcoderdataPython |
6466844 | from kafka import KafkaConsumer
import logging
import argparse
import os
def run_job(broker):
consumer = KafkaConsumer('example', bootstrap_servers=broker)
for msg in consumer:
print(str(msg.value, 'utf-8'))
def get_arg(env, default):
return os.getenv(env) if os.getenv(env, '') is not '' else default
def parse_args(parser):
args = parser.parse_args()
args.brokers = get_arg('KAFKA_BROKERS', args.brokers)
args.topic = get_arg('KAFKA_TOPIC', args.topic)
args.conf = get_arg('PORT', args.port)
return args
def main(args):
logging.info('brokers={}'.format(args.brokers))
logging.info('topic={}'.format(args.topic))
logging.info('port={}'.format(args.port))
run_job(args.brokers)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('starting timeseries-mock consumer')
parser = argparse.ArgumentParser(
description='timeseries-mock consumer example for Kafka')
parser.add_argument(
'--brokers',
help='The bootstrap servers, env variable KAFKA_BROKERS',
default='localhost:9092')
parser.add_argument(
'--topic',
help='Topic to publish to, env variable KAFKA_TOPIC',
default='data')
parser.add_argument(
'--port',
type=int,
help='Web server port',
default=8080)
args = parse_args(parser)
main(args)
logging.info('exiting')
| StarcoderdataPython |
46910 | <reponame>anhinga/2019-python-drafts<filename>dash-cytoscape/cyto-edit-graph.py
# based on cyto-multiselect-callback.py
# added ability to add nodes and edges
# + experiments with styling
# + logging (but not enough to conveniently store changes between sessions)
#import json
import datetime
print("START")
with open("logfile.txt", "a") as file:
print(str(datetime.datetime.now()), file=file)
import dash
import dash_cytoscape as cyto
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
app = dash.Dash(__name__)
nodes = [
{
'data': {'id': short, 'label': label},
'position': {'x': 20*lon, 'y': -20*lat}
}
for short, label, lat, lon in (
('la', 'Los Angeles', 34.03, -118.25),
('nyc', 'New York', 40.71, -74),
('to', 'Toronto', 43.65, -79.38),
('mtl', 'Montreal', 45.50, -73.57),
('van', 'Vancouver', 49.28, -123.12),
('chi', 'Chicago', 41.88, -87.63),
('bos', 'Boston', 42.36, -71.06),
('hou', 'Houston', 29.76, -95.37)
)
]
edges = [
{'data': {'source': source, 'target': target}}
for source, target in (
('van', 'la'),
('la', 'chi'),
('hou', 'chi'),
('to', 'mtl'),
('mtl', 'bos'),
('nyc', 'bos'),
('to', 'hou'),
('to', 'nyc'),
('la', 'nyc'),
('nyc', 'bos')
)
]
selected_source = ""
selected_target = ""
default_stylesheet = [
{
'selector': 'node',
'style': {
'background-color': '#BFD7B5',
'label': 'data(label)'
}
},
{
'selector': 'edge',
'style': {
'curve-style': 'bezier',
'target-arrow-shape': 'vee',
}
},
{
'selector': ':selected',
'style': {
'background-color': 'purple',
'shape': 'octagon',
#'width': '35px' # have not figured out a way to do it only for nodes, but not for edges
#'height': '25px'
#'line-color': 'blue' # commented out: let's keep the default, some different shade of blue
}
}
]
app.layout = html.Div([
html.Div([
html.Div(style={'width': '20%', 'display': 'inline'}, children=[
'Label:',
dcc.Input(id='input-label', type='text')
]),
html.Div(style={'width': '20%', 'display': 'inline'}, children=[
'Id:',
dcc.Input(id='input-node-id', type='text')
]),
html.Div(style={'width': '20%', 'display': 'inline'}, children=[
'Lat:',
dcc.Input(id='input-lat', type='text')
]),
html.Div(style={'width': '20%', 'display': 'inline'}, children=[
'Lon:',
dcc.Input(id='input-lon', type='text')
]),
html.Button('Add Node', id='btn-add-node', n_clicks_timestamp=0),
]),
html.Div([
html.Div(style={'width': '20%', 'display': 'inline'}, children=[
'Source Id:',
dcc.Input(id='source-id', type='text')
]),
html.Div(style={'width': '20%', 'display': 'inline'}, children=[
'Target Id:',
dcc.Input(id='target-id', type='text')
]),
html.Button('Add Edge', id='btn-add-edge', n_clicks_timestamp=0),
]),
cyto.Cytoscape(
id='cytoscape-graph',
layout={'name': 'preset'},
elements=edges+nodes,
stylesheet=default_stylesheet,
boxSelectionEnabled=True,
style={'width': '100%', 'height': '450px'}
),
dcc.Markdown(id='cytoscape-selectedNodeData-markdown')
])
@app.callback([Output('cytoscape-selectedNodeData-markdown', 'children'),
Output('source-id', 'value'),
Output('target-id', 'value')],
[Input('cytoscape-graph', 'selectedNodeData'),
Input('cytoscape-graph', 'selectedEdgeData')])
def displaySelectedNodeData(node_data_list, edge_data_list):
global selected_source
global selected_target
if node_data_list:
cities_list = [data['label']+' ('+data['id']+')' for data in node_data_list]
cities_string = "\nYou selected the following cities:\n* " + "\n* ".join(cities_list)
if len(node_data_list) > 0: selected_source = node_data_list[0]['id']
if len(node_data_list) > 1: selected_target = node_data_list[1]['id']
else:
cities_string = ""
if edge_data_list:
routes_list = [data['source']+"->"+data['target'] for data in edge_data_list]
routes_string = "\n\nYou selected the following routes:\n* " + "\n* ".join(routes_list)
else:
routes_string = ""
return cities_string + routes_string, selected_source, selected_target
@app.callback(Output('cytoscape-graph', 'elements'),
[Input('btn-add-node','n_clicks_timestamp'),
Input('btn-add-edge','n_clicks_timestamp')],
[State('input-label', 'value'),
State('input-node-id', 'value'),
State('input-lat', 'value'),
State('input-lon', 'value'),
State('source-id', 'value'),
State('target-id', 'value')])
def addNodeOrEdge(timestamp_add_node, timestamp_add_edge, input_label, input_id, input_lat, input_lon, source_id, target_id):
print("timestamp_add_node = ", timestamp_add_node)
print("timestamp_add_edge = ", timestamp_add_edge)
# good only for single user
global nodes
global edges
if timestamp_add_node > timestamp_add_edge:
if not input_lat: input_lat = '38.5'
if not input_lon: input_lon = '-100.0'
new_node = {
'data': {'id': input_id, 'label': input_label},
'position': {'x': 20*float(input_lon), 'y': -20*float(input_lat)}
}
with open("logfile.txt", "a") as file:
print(new_node, file=file)
nodes = nodes + [new_node]
if timestamp_add_edge > timestamp_add_node:
new_edge = {
'data': {'source': source_id, 'target': target_id},
}
with open("logfile.txt", "a") as file:
print(new_edge, file=file)
edges = edges + [new_edge]
return edges+nodes
if __name__ == '__main__':
print("ABOUT TO RUN SERVER")
app.run_server(debug=True)
| StarcoderdataPython |
6528852 | <reponame>LudovicRousseau/pycryptoki<filename>pycryptoki/hsm_management.py
"""
Methods responsible for pycryptoki 'hsm management' set of commands.
"""
from _ctypes import pointer
from ctypes import byref, create_string_buffer, cast
from .attributes import Attributes, to_byte_array
from .common_utils import AutoCArray, refresh_c_arrays
from .cryptoki import (
CK_SLOT_ID,
CK_USER_TYPE,
CA_SetTokenCertificateSignature,
CA_HAInit,
CA_HAInitExtended,
CA_CreateLoginChallenge,
CA_InitializeRemotePEDVector,
CA_DeleteRemotePEDVector,
CA_MTKRestore,
CA_MTKResplit,
CA_MTKZeroize,
CK_ULONG,
CK_BYTE_PTR,
CK_BYTE,
CK_CHAR_PTR,
CK_CHAR,
CA_SetHSMPolicy,
CK_SESSION_HANDLE,
CA_SetHSMPolicies,
CA_SetDestructiveHSMPolicy,
CA_SetDestructiveHSMPolicies,
CA_GetHSMCapabilitySet,
CA_GetHSMCapabilitySetting,
CA_GetHSMPolicySet,
CA_GetHSMPolicySetting,
)
from .exceptions import make_error_handle_function
def c_performselftest(slot, test_type, input_data, input_data_len):
"""Test: Performs a self test for specified test type on a given slot.
:param slot: slot number
:param test_type: type of test CK_ULONG
:param input_data: pointer to input data CK_BYTE_PTR
:param input_data_len: input data length CK_ULONG
:returns: the result code
[CK_SLOT_ID, CK_ULONG, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
"""
test_type = CK_ULONG(test_type)
input_length = CK_ULONG(input_data_len)
input_data = (CK_BYTE * input_data_len)(*input_data)
output_data = cast(create_string_buffer(b"", input_data_len), CK_BYTE_PTR)
output_data_len = CK_ULONG()
try:
from .cryptoki import CA_PerformSelfTest as selftest
except ImportError:
from .cryptoki import C_PerformSelftest as selftest
ret = selftest(slot, test_type, input_data, input_length, output_data, byref(output_data_len))
return ret, output_data
c_performselftest_ex = make_error_handle_function(c_performselftest)
def ca_settokencertificatesignature(
h_session, access_level, customer_id, pub_template, signature, signature_len
):
"""Completes the installation of a certificate on a token.
The caller must supply a public key and a signature for token certificate.
The public key is provided through the template; it must contain a key
type, a modulus and a public exponent.
:param int h_session: Session handle
:param access_level: the access level
:param customer_id: the customer ID
:param pub_template: the public template
:param signature: the signature
:param signature_len: the length in bytes of the signature
:returns: the result code
"""
access_level = CK_ULONG(access_level)
customer_id = CK_ULONG(customer_id)
key_attributes = Attributes(pub_template)
pub_template_len = CK_ULONG(len(pub_template))
signature = (CK_BYTE * signature_len)(*signature)
signature_length = CK_ULONG(signature_len)
ret = CA_SetTokenCertificateSignature(
h_session,
access_level,
customer_id,
key_attributes.get_c_struct(),
pub_template_len,
signature,
signature_length,
)
return ret
ca_settokencertificatesignature_ex = make_error_handle_function(ca_settokencertificatesignature)
def ca_hainit(h_session, h_key):
"""Creates a login key pair on the primary token.
:param int h_session: Session handle
:param h_key: the login private key
:returns: the result code
"""
ret = CA_HAInit(h_session, h_key)
return ret
ca_hainit_ex = make_error_handle_function(ca_hainit)
def ca_hainitextended(h_session, h_key, pkc, user_types):
"""Creates a login key pair on the primary token.
:param int h_session: Session handle
:param h_key: the login private key or 0
:param pkc: private key PKC or None
:param user_types: list of pairs (user, tokenType) i.e.
[(CKU_SO, CKF_ADMIN_TOKEN)]
[(CKU_USER, 0)]
or None if revoke is issued
:returns: the result code
"""
if pkc is not None:
pkc_ptr, pkc_len = to_byte_array(pkc)
else:
pkc_ptr = None
pkc_len = 0
if user_types is not None:
users_ptr = (CK_ULONG * len(user_types))(*[x[0] for x in user_types])
tokens_ptr = (CK_ULONG * len(user_types))(*[x[1] for x in user_types])
number_roles = CK_ULONG(len(user_types))
else:
users_ptr = None
tokens_ptr = None
number_roles = CK_ULONG(0)
ret = CA_HAInitExtended(h_session, h_key, pkc_ptr, pkc_len, users_ptr, tokens_ptr, number_roles)
return ret
ca_hainitextended_ex = make_error_handle_function(ca_hainitextended)
def ca_createloginchallenge(h_session, user_type, challenge):
"""Creates a login challenge for the given user.
:param int h_session: Session handle
:param user_type: user type
:param challenge: challenge
:returns: the result code
"""
challenge_length = CK_ULONG(len(challenge))
challenge = cast(create_string_buffer(challenge), CK_CHAR_PTR)
output_data_length = CK_ULONG()
output_data = CK_CHAR()
ret = CA_CreateLoginChallenge(
h_session,
CK_USER_TYPE(user_type),
challenge_length,
challenge,
output_data_length,
output_data,
)
return ret, output_data
ca_createloginchallenge_ex = make_error_handle_function(ca_createloginchallenge)
def ca_initializeremotepedvector(h_session):
"""Initializes a remote PED vector
:param int h_session: Session handle
:returns: the result code
"""
ret = CA_InitializeRemotePEDVector(h_session)
return ret
ca_initializeremotepedvector_ex = make_error_handle_function(ca_initializeremotepedvector)
def ca_deleteremotepedvector(h_session):
"""Deletes a remote PED vector
:param int h_session: Session handle
:returns: the result code
"""
ret = CA_DeleteRemotePEDVector(h_session)
return ret
ca_deleteremotepedvector_ex = make_error_handle_function(ca_deleteremotepedvector)
def ca_mtkrestore(slot):
"""Restore the MTK
:param slot: slot number
:returns: the result code
"""
ret = CA_MTKRestore(CK_SLOT_ID(slot))
return ret
ca_mtkrestore_ex = make_error_handle_function(ca_mtkrestore)
def ca_mtkresplit(slot):
"""Resplit the MTK
:param slot: slot number
:returns: the result code
"""
ret = CA_MTKResplit(CK_SLOT_ID(slot))
return ret
ca_mtkresplit_ex = make_error_handle_function(ca_mtkresplit)
def ca_mtkzeroize(slot):
"""Zeroize the MTK
:param slot: slot number
:returns: the result code
"""
ret = CA_MTKZeroize(CK_SLOT_ID(slot))
return ret
ca_mtkzeroize_ex = make_error_handle_function(ca_mtkzeroize)
def ca_set_hsm_policy(h_session, policy_id, policy_val):
"""Sets the HSM policies by calling CA_SetHSMPolicy
:param int h_session: Session handle
:param policy_id: The ID of the policy being set
:param policy_val: The value of the policy being set
:returns: The result code
"""
ret = CA_SetHSMPolicy(h_session, CK_ULONG(policy_id), CK_ULONG(policy_val))
return ret
ca_set_hsm_policy_ex = make_error_handle_function(ca_set_hsm_policy)
def ca_set_hsm_policies(h_session, policies):
"""
Set multiple HSM policies.
:param int h_session: Session handle
:param policies: dict of policy ID ints and value ints
:return: result code
"""
h_sess = CK_SESSION_HANDLE(h_session)
pol_id_list = list(policies.keys())
pol_val_list = list(policies.values())
pol_ids = AutoCArray(data=pol_id_list, ctype=CK_ULONG)
pol_vals = AutoCArray(data=pol_val_list, ctype=CK_ULONG)
ret = CA_SetHSMPolicies(h_sess, pol_ids.size.contents, pol_ids.array, pol_vals.array)
return ret
ca_set_hsm_policies_ex = make_error_handle_function(ca_set_hsm_policies)
def ca_set_destructive_hsm_policy(h_session, policy_id, policy_val):
"""Sets the destructive HSM policies by calling CA_SetDestructiveHSMPolicy
:param int h_session: Session handle
:param policy_id: The ID of the policy being set
:param policy_val: The value of the policy being set
:returns: The result code
"""
ret = CA_SetDestructiveHSMPolicy(h_session, CK_ULONG(policy_id), CK_ULONG(policy_val))
return ret
ca_set_destructive_hsm_policy_ex = make_error_handle_function(ca_set_destructive_hsm_policy)
def ca_set_destructive_hsm_policies(h_session, policies):
"""
Set multiple HSM policies.
:param int h_session: Session handle
:param policies: dict of policy ID ints and value ints
:return: result code
"""
h_sess = CK_SESSION_HANDLE(h_session)
pol_id_list = list(policies.keys())
pol_val_list = list(policies.values())
pol_ids = AutoCArray(data=pol_id_list, ctype=CK_ULONG)
pol_vals = AutoCArray(data=pol_val_list, ctype=CK_ULONG)
ret = CA_SetDestructiveHSMPolicies(h_sess, pol_ids.size.contents, pol_ids.array, pol_vals.array)
return ret
ca_set_destructive_hsm_policies_ex = make_error_handle_function(ca_set_destructive_hsm_policies)
def ca_get_hsm_capability_set(slot):
"""
Get the capabilities of the given slot.
:param int slot: Target slot number
:return: retcode, {id: val} dict of capabilities (None if command failed)
"""
slot_id = CK_SLOT_ID(slot)
cap_ids = AutoCArray()
cap_vals = AutoCArray()
@refresh_c_arrays(1)
def _get_hsm_caps():
"""Closer for retries to work w/ properties"""
return CA_GetHSMCapabilitySet(
slot_id, cap_ids.array, cap_ids.size, cap_vals.array, cap_vals.size
)
ret = _get_hsm_caps()
return ret, dict(list(zip(cap_ids, cap_vals)))
ca_get_hsm_capability_set_ex = make_error_handle_function(ca_get_hsm_capability_set)
def ca_get_hsm_capability_setting(slot, capability_id):
"""
Get the value of a single capability
:param slot: slot ID of slot to query
:param capability_id: capability ID
:return: result code, CK_ULONG representing capability active or not
"""
slot_id = CK_SLOT_ID(slot)
cap_id = CK_ULONG(capability_id)
cap_val = CK_ULONG()
ret = CA_GetHSMCapabilitySetting(slot_id, cap_id, pointer(cap_val))
return ret, cap_val.value
ca_get_hsm_capability_setting_ex = make_error_handle_function(ca_get_hsm_capability_setting)
def ca_get_hsm_policy_set(slot):
"""
Get the policies of the given slot.
:param int slot: Target slot number
:return: retcode, {id: val} dict of policies (None if command failed)
"""
slot_id = CK_SLOT_ID(slot)
pol_ids = AutoCArray()
pol_vals = AutoCArray()
@refresh_c_arrays(1)
def _ca_get_hsm_policy_set():
"""Closure for retries."""
return CA_GetHSMPolicySet(
slot_id, pol_ids.array, pol_ids.size, pol_vals.array, pol_vals.size
)
ret = _ca_get_hsm_policy_set()
return ret, dict(list(zip(pol_ids, pol_vals)))
ca_get_hsm_policy_set_ex = make_error_handle_function(ca_get_hsm_policy_set)
def ca_get_hsm_policy_setting(slot, policy_id):
"""
Get the value of a single policy
:param slot: slot ID of slot to query
:param policy_id: policy ID
:return: result code, CK_ULONG representing policy active or not
"""
slot_id = CK_SLOT_ID(slot)
pol_id = CK_ULONG(policy_id)
pol_val = CK_ULONG()
ret = CA_GetHSMPolicySetting(slot_id, pol_id, pointer(pol_val))
return ret, pol_val.value
ca_get_hsm_policy_setting_ex = make_error_handle_function(ca_get_hsm_policy_setting)
| StarcoderdataPython |
4874712 | <reponame>CIS-560/pokemon_breeding_django<filename>web/urls.py
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from apps.pokemon_app import views
from django.contrib.auth import views as auth_views
admin.autodiscover()
urlpatterns = []
# Debug Toolbar
if settings.DEBUG:
import debug_toolbar
from django.contrib.staticfiles import views as staticviews
from django.conf.urls.static import static
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^static/(?P<path>.*)$', staticviews.serve),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
normalpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login/', auth_views.login,{'template_name': 'login.html'}, name ='login'),
url(r'^register/', views.register,name ='register' ),
url(r'^$', views.app_entry, name='app_entry'),
url(r'^egg_moves/$', views.egg_moves, name='egg_moves'),
url(r'^add_to_favorites/$', views.add_to_favorites, name='add_to_favorites'),
url(r'^results/$', views.results, name='results'),
url(r'^simple_upload/', views.simple_upload, name='simple_upload'),
url(r'^upload/', views.parse, name='parse'),
url(r'^favorites/', views.favorites, name='favorites'),
]
urlpatterns += normalpatterns
| StarcoderdataPython |
3582393 | <reponame>Acidburn0zzz/pontoon
from silme.core import EntityList, Entity, Comment
from .structure import GettextStructure
import re
class GettextParser():
patterns = {}
patterns['entity'] = re.compile('^msgid "([^"]*)"\nmsgstr ((?:"[^"]*"\n?)*(?:"[^"]*"))$',re.M|re.S)
patterns['comment'] = re.compile('^#([^\n]*)$',re.M)
patterns['msgctxt'] = re.compile('^msgctxt [^\n]*\n',re.M|re.S)
@classmethod
def parse(cls, text, code='default'):
po = GettextStructure()
cls.build_element_list(text, po, code=code)
po.fallback = code
return po
@classmethod
def parse_to_entitylist(cls, text, code='default'):
entityList = EntityList()
text = cls.patterns['comment'].sub('', text)
matchlist = cls.patterns['entity'].findall(text)
for match in matchlist:
if match[0]:
entityList.add_entity(Entity(match[0], match[1], code))
return entityList
@classmethod
def build_element_list (cls, text, object, type='comment', code='default', pointer=0, end=None):
cls.split_msgctxt(text, object, code)
@classmethod
def split_msgctxt(cls, text, object, code='default', pointer=0, end=None):
'''
this method removes all msgctxt for now (we don't know how to parse them anyway)
'''
pattern = cls.patterns['msgctxt']
text = re.sub(pattern, '', text)
cls.split_comments(text, object, code=code, pointer=pointer, end=end)
@classmethod
def split_comments (cls, text, object, code='default', pointer=0, end=None):
pattern = cls.patterns['comment']
if end:
match = pattern.search(text, pointer, end)
else:
match = pattern.search(text, pointer)
while match:
st0 = match.start(0)
if st0 > pointer:
cls.split_entities(text, object, code=code, pointer=pointer, end=st0)
comment = Comment()
cls.split_entities(match.group(1), comment, code=code)
if len(object) and isinstance(object[-1], Comment):
object[-1].add_elements(comment)
elif len(object)>1 and (isinstance(object[-2], Comment) and
object[-1]=='\n'):
object[-2].add_string(object[-1])
object[-2].add_elements(comment)
del object[-1]
else:
object.append(comment)
pointer = match.end(0)
if end:
match = pattern.search(text, pointer, end)
else:
match = pattern.search(text, pointer)
if len(text) > pointer:
cls.split_entities(text, object, code=code, pointer=pointer)
@classmethod
def split_entities (cls, text, object, code='default', pointer=0, end=None):
pattern = cls.patterns['entity']
if end:
match = pattern.search(text, pointer, end)
else:
match = pattern.search(text, pointer)
while match:
if match.start(0) > pointer:
object.append(text[pointer:match.start(0)])
entity = Entity(match.group(1), cls._clean_value(match.group(2)))
entity.params['source'] = {'type': 'gettext',
'string': match.group(0),
'valpos':match.start(2)-match.start(0)}
object.append(entity)
pointer = match.end(0)
if end:
match = pattern.search(text, pointer, end)
else:
match = pattern.search(text, pointer)
if (not end or (end > pointer)) and len(text) > pointer:
if end:
object.append(text[pointer:end])
else:
object.append(text[pointer:])
return object
@classmethod
def _clean_value(cls, text):
return text.replace('"', '')
| StarcoderdataPython |
5168756 | <filename>library/icinga2_ca.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) 2020, <NAME> <<EMAIL>>
# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
# import json
import os
from ansible.module_utils.basic import AnsibleModule
# import urllib3
# from requests import Session
class Icinga2CaHelper(object):
"""
Main Class to implement the Icinga2 API Client
"""
module = None
def __init__(self, module):
"""
Initialize all needed Variables
"""
self.module = module
self._icinga2 = module.get_bin_path('icinga2', True)
self.lib_directory = "/var/lib/icinga2"
self.hostname = module.params.get("hostname")
self.common_name = module.params.get("common_name")
self.key_file = module.params.get("key_file")
self.csr_file = module.params.get("csr_file")
self.cert_file = module.params.get("cert_file")
self.force = module.params.get("force")
module.log(msg="icinga2 : {} ({})".format(self._icinga2, type(self._icinga2)))
module.log(msg="hostname : {} ({})".format(self.hostname, type(self.hostname)))
module.log(msg="common_name : {} ({})".format(self.common_name, type(self.common_name)))
module.log(msg="key_file : {} ({})".format(self.key_file, type(self.key_file)))
module.log(msg="csr_file : {} ({})".format(self.csr_file, type(self.csr_file)))
module.log(msg="cert_file : {} ({})".format(self.cert_file, type(self.cert_file)))
module.log(msg="force : {} ({})".format(self.force, type(self.force)))
def run(self):
''' ... '''
result = dict(
failed=False,
changed=False,
ansible_module_results="none"
)
if(self.force):
self.module.log(msg="force mode ...")
self._remove_directory(os.path.join(self.lib_directory, 'ca'))
self._remove_directory(os.path.join(self.lib_directory, 'certs'))
# Sets up a new Certificate Authority.
# icinga2 pki new-ca
self.module.log(msg="Sets up a new Certificate Authority.")
key = os.path.join(os.path.join(self.lib_directory, 'ca', 'ca.key'))
cert = os.path.join(os.path.join(self.lib_directory, 'ca', 'ca.cert'))
self.module.log(msg=" key : '{}'".format(key))
self.module.log(msg=" cert : '{}'".format(cert))
if(not os.path.isfile(key) and not os.path.isfile(key)):
rc, out = self._exec(["new-ca"])
self.module.log(msg=" rc : '{}'".format(rc))
self.module.log(msg=" out: '{}'".format(out))
result['ansible_module_results'] = "Command returns {}".format(out)
if(rc == 0):
result['changed'] = True
else:
result['failed'] = True
else:
result['ansible_module_results'] = "CA already exists"
self.module.log(msg="skip, CA already exists")
# Creates a new Certificate Signing Request, a self-signed X509 certificate or both.
# icinga2 pki new-cert
# --cn {{ icinga2_certificate_cn }}
# --key {{ icinga2_pki_dir }}/{{ inventory_hostname }}.key
# --csr {{ icinga2_pki_dir }}/{{ inventory_hostname }}.csr
self.module.log(msg="Creates a new Certificate Signing Request, a self-signed X509 certificate or both.")
key = os.path.join(os.path.join(self.lib_directory, 'certs', '{}.key'.format(self.hostname)))
csr = os.path.join(os.path.join(self.lib_directory, 'certs', '{}.csr'.format(self.hostname)))
cert = os.path.join(os.path.join(self.lib_directory, 'certs', '{}.crt'.format(self.hostname)))
self.module.log(msg=" key : '{}'".format(key))
self.module.log(msg=" csr : '{}'".format(csr))
self.module.log(msg=" cert : '{}'".format(cert))
if(not os.path.isfile(csr)):
rc, out = self._exec([
"new-cert",
"--cn",
self.common_name,
"--key",
key,
"--csr",
csr
])
self.module.log(msg=" rc : '{}'".format(rc))
self.module.log(msg=" out: '{}'".format(out))
result['ansible_module_results'] = "Command returns {}".format(out)
if(rc == 0):
result['changed'] = True
else:
result['failed'] = True
else:
result['ansible_module_results'] = "skip, csr already created"
self.module.log(msg="skip, csr already created")
# Reads a Certificate Signing Request from stdin and prints a signed certificate on stdout.
# icinga2 pki sign-csr
# --csr {{ icinga2_pki_dir }}/{{ inventory_hostname }}.csr
# --cert {{ icinga2_pki_dir }}/{{ inventory_hostname }}.crt
self.module.log(msg="Reads a Certificate Signing Request from stdin and prints a signed certificate on stdout.")
if(not os.path.isfile(cert)):
rc, out = self._exec([
"sign-csr",
"--csr",
csr,
"--cert",
cert
])
self.module.log(msg=" rc : '{}'".format(rc))
self.module.log(msg=" out: '{}'".format(out))
result['ansible_module_results'] = "Command returns {}".format(out)
if(rc == 0):
result['changed'] = True
else:
result['failed'] = True
else:
result['ansible_module_results'] = "skip, cert already created."
self.module.log(msg="skip, cert already created.")
return result
"""
"""
def _exec(self, args):
''' '''
cmd = [self._icinga2, 'pki'] + args
self.module.log(msg="cmd: {}".format(cmd))
rc, out, err = self.module.run_command(cmd, check_rc=True)
return rc, out
def _remove_directory(self, directory):
''' .... '''
self.module.log(msg="remove directory {}".format(directory))
for root, dirs, files in os.walk(directory, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="present", choices=["absent", "present"]),
hostname=dict(required=True),
common_name=dict(required=True),
key_file=dict(required=False),
csr_file=dict(required=False),
cert_file=dict(required=False),
force=dict(required=False, default=False, type='bool'),
),
supports_check_mode=True,
)
icinga = Icinga2CaHelper(module)
result = icinga.run()
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
| StarcoderdataPython |
288355 | #!/usr/bin/env python
# -*- coding:utf8 -*-
#
class htmlfind:
def __init__(self, html, reg, which):
self.s = ''
self.start = 0
self.which = 0
self._begin(html, reg, which)
def _begin(self, s, reg, which):
if isinstance(s, unicode):
s = s.encode('utf-8')
regtype = type(re.compile(''))
if isinstance(reg, unicode) or isinstance(reg, str) or isinstance(reg, regtype):
reg = [reg]
if not isinstance(reg, list):
raise RuntimeError("unknown type")
start = 0
for r in reg:
if isinstance(r, unicode):
r = r.encode('utf-8')
if isinstance(r, str):
m = re.search(r, s, start)
elif isinstance(r, regtype):
m = r.search(s, start)
else:
raise RuntimeError("unknown type")
if m is not None:
start = m.end(0)
else:
start = len(s)
break
self.s = s
self.start = start
self.which = which
def process_form(self):
return cutil.process_form(self.s, self.start, self.which)
def get_node(self):
return cutil.get_html_node(self.s, self.start, self.which)
def get_text(self):
return cutil.get_html_text(self.s, self.start, self.which)
def get_text_hash(self):
return cutil.get_html_text_hash(self.s, self.start, self.which)
@staticmethod
def findTag(doc, tag, attr=None, text_pattern=None):
pat = None
if not attr and not text_pattern:
pat = ur'<{}[^<>]*>(.*?)</{}>'.format(tag, tag)
elif not attr and text_pattern:
pat = ur'<{}[^>]*?>{}</{}>'.format(tag, text_pattern, tag)
elif attr and not text_pattern:
pat = ur'<{}[^>]*{}[^>]*>(.*?)</{}>'.format(tag, attr, tag)
elif attr and text_pattern:
pat = ur'<{}[^>]*{}[^>]*>{}</{}>'.format(tag, attr, text_pattern, tag)
els = re.findall(pat, doc, re.S)
return els
@staticmethod
def remove_tag(s, fmt=False):
if fmt:
r = re.sub(r'<br>|<p>|<BR>', '\n', s)
r = re.sub(r'(<[^>]*>)', '', r)
r = re.sub(r' ', ' ', r)
r = re.sub(r'[\t\r ]+', ' ', r)
r = re.sub(r'\s+\n+\s+', '\n', r)
r = re.sub(r'^\s+|\s+$', '', r)
else:
r = re.sub(r'(<[^>]*>)', '', s)
r = re.sub(r' ', ' ', r)
return r
def runjs(jscode):
jscode = utf8str(jscode)
nodeapp = which("node")
# nodeapp = '/usr/local/bin/node'
if nodeapp is None:
nodeapp = which("nodejs")
if nodeapp is None:
raise RuntimeError("nodejs is NOT found!")
node = subprocess.Popen(nodeapp, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True,
bufsize=len(jscode) + 1)
node.stdin.write(jscode)
node.stdin.close()
ooo = ''
while True:
oo1 = node.stdout.read(1024)
if not oo1:
break
ooo += oo1
node.wait()
if node.returncode == 0:
return ooo
else:
raise RuntimeError("execute js failed.", node.returncode, ooo)
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
# print os.environ["PATH"]
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class TimeHandler(object):
@staticmethod
def isFontDate(datestr):
return re.match(r'(&#x[\d\w]{4};){4}-(&#x[\d\w]{4};){2}-(&#x[\d\w]{4};){2}', datestr)
@staticmethod
def cv58fontDateCvt(datestr):
if not TimeHandler.isFontDate(datestr):
return datestr
ds = re.sub('-', '', datestr.replace('&#', '0'))
numstrs = ds.split(';')
retstr = ['2']
relInt = int(numstrs[0], 16)
for numstr in numstrs[1:8]:
retstr.append(str(int(numstr, 16) - relInt + 2))
return '%s-%s-%s' % (''.join(retstr[:4]), ''.join(retstr[4:6]), ''.join(retstr[6:]))
@staticmethod
def isBeforeNDay(t, day):
if isinstance(t, str) or isinstance(t, unicode):
m = re.search('(\d+)-(\d+)-(\d+).*?(\d+):(\d+):(\d+)', t)
if m:
arr = [int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)),
int(m.group(6)), 0, 0, 0]
t = time.mktime(arr)
if time.time() - t > 3600 * 24 * day:
return True
if isinstance(t, int):
if int(time.time()) - t / 1000 > 3600 * 24 * day:
return True
return False
@staticmethod
def getTimeOfNDayBefore(day):
day = int(day)
one_day = 24 * 3600
nday_before = time.time() - day * one_day
return int(nday_before * 1000)
@staticmethod
def fmt_time(tag):
if isinstance(tag, unicode):
tag = tag.encode('utf-8')
now_time = list(time.localtime())
t = re.search(r'(\d+):(\d+)', tag)
if t:
now_time[3] = int(t.group(1))
now_time[4] = int(t.group(2))
return int(time.mktime(now_time) * 1000)
t = re.search(r'(\d+)-(\d+)-(\d+)', tag)
if t:
now_time[0] = int(t.group(1))
now_time[1] = int(t.group(2))
now_time[2] = int(t.group(3))
return int(time.mktime(now_time) * 1000)
t = re.search(r'(\d+)-(\d+)', tag)
if t:
now_time[1] = int(t.group(1))
now_time[2] = int(t.group(2))
return int(time.mktime(now_time) * 1000)
t = re.search(r'(\d+)/(\d+)/(\d+)', tag)
if t:
now_time[0] = int(t.group(1))
now_time[1] = int(t.group(2))
now_time[2] = int(t.group(3))
return int(time.mktime(now_time) * 1000)
t = re.search(r'(\d+)小时', tag)
if t:
hour = int(t.group(1))
return int(time.time() - hour * 3600) * 1000
t = re.search(r'(\d+)分钟', tag)
if t:
minute = int(t.group(1))
return int(time.time() - minute * 60) * 1000
t = re.search(r'(\d+).*?天', tag)
if t:
day = t.group(1)
return TimeHandler.getTimeOfNDayBefore(day)
t = re.search(r'前天', tag)
if t:
return TimeHandler.getTimeOfNDayBefore(2)
t = re.search(r'昨天', tag)
if t:
return TimeHandler.getTimeOfNDayBefore(1)
t = re.search(r'今天', tag)
if t:
return TimeHandler.getTimeOfNDayBefore(0)
t = re.search(r'刚刚', tag)
if t:
return int(time.time()) * 1000
t = re.search(r'(\d+)月内', tag)
if t:
day = int(t.group(1)) * 30
return TimeHandler.getTimeOfNDayBefore(day)
t = re.search(r'(\d+)周内', tag)
if t:
day = int(t.group(1)) * 7
return TimeHandler.getTimeOfNDayBefore(day)
t = re.search(r'(\d+).*?day', tag)
if t:
day = t.group(1)
return TimeHandler.getTimeOfNDayBefore(day)
t = re.search(r'(\d+).*?hour', tag)
if t:
hour = int(t.group(1))
return int(time.time() - hour * 3600) * 1000
t = re.search(r'(\d+).*?minute', tag)
if t:
minute = int(t.group(1))
return int(time.time() - minute * 60) * 1000
t = re.search(r'(\d+)个月前', tag)
if t:
day = int(t.group(1)) * 30
return TimeHandler.getTimeOfNDayBefore(day)
raise Exception("not copy time pattern: {}".format(tag))
class System:
@staticmethod
def hostname():
try:
return os.uname()[1]
except:
pass
try:
with open('/etc/hostname') as f:
return f.readline().strip()
except:
pass
try:
a = os.popen("hostname")
return a.read().strip()
except:
pass
return None
@staticmethod
def is_osx():
return system() == 'Darwin'
def empty_str(s):
return s is None or s == ''
def chained_regex(s, *regex):
inp = [s]
outarr = []
retype = type(re.compile(''))
for ri in regex:
for ss in inp:
if isinstance(ri, str) or isinstance(ri, unicode):
m = re.findall(ri, ss)
elif isinstance(ri, retype): # assume ri is a compiled pattern
m = ri.findall(ss)
else:
raise RuntimeError('invalid arg')
if m:
outarr.extend(m)
if len(outarr) == 0:
return []
inp = outarr
outarr = []
return inp
def unique_list(arr, key_fun=None):
if not isinstance(arr, list):
return arr
oarr = []
_tmp_ = []
func = (lambda a: a) if key_fun is None else key_fun
for i in arr:
c = func(i)
if c not in _tmp_:
_tmp_.append(c)
oarr.append(i)
return oarr
def sendmail(email, title, message, is_html=False):
username = ''
password = ''
smtphost = ''
smtpport = ''
if isinstance(message, unicode):
message = message.encode('utf-8')
if isinstance(title, unicode):
title = message.encode('utf-8')
if is_html:
msg = MIMEText(message, 'html', 'utf-8')
else:
msg = MIMEText(message, 'plain', 'utf-8')
msg['Subject'] = Header(title, 'utf-8')
msg['From'] = username
if isinstance(email, list):
msg['To'] = '; '.join(email)
tolist = email
else:
msg['To'] = email
tolist = [email]
for i in range(0, len(tolist)):
m = re.search('<([a-z0-9_@\-.]*)>\s*$', tolist[i], re.I)
if m:
tolist[i] = m.group(1)
print "sending mail to", tolist
print msg.as_string()
s = smtplib.SMTP_SSL(smtphost, smtpport)
s.login(username, password)
s.sendmail(username, tolist, msg.as_string())
s.quit()
| StarcoderdataPython |
1883994 | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
a, b, c, d = map(int, input().split())
if a <= d and c <= b:
print('Yes')
else:
print('No')
| StarcoderdataPython |
3371700 | <reponame>nikofil/mitmproxy
import mitmproxy
from mitmproxy.net import tcp
from mitmproxy import ctx
class CheckALPN:
def __init__(self):
self.failed = False
def configure(self, options, updated):
self.failed = mitmproxy.ctx.master.options.http2 and not tcp.HAS_ALPN
if self.failed:
ctx.log.warn(
"HTTP/2 is disabled because ALPN support missing!\n"
"OpenSSL 1.0.2+ required to support HTTP/2 connections.\n"
"Use --no-http2 to silence this warning."
)
| StarcoderdataPython |
1614768 | <reponame>saphid/OMDbCLI<filename>src/client.py
""" This module handles calling OMDbAPI
Usage:
client = OMDbClient(apikey="xxxxxx")
movie = client.get_movie_by_id(id="tt0086190")
print(movie.title)
"""
import logging
import sys
from typing import Dict, List, Any, Optional
import requests
from src.models import Movie, Query
from src.config import ConfigManager
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class OMDbClient():
""" Handles all the calls to OMDbAPI and provides functions for getting and searching movies"""
def __init__(self, base_url: str = None):
self.config = ConfigManager()
self.base_url = base_url if base_url else 'http://www.omdbapi.com/'
def get_movie(self, query: Query) -> Movie:
""" Runs the query provided to return the best match movie
Args:
query (Query): Query with all the data needed to run the request
Returns:
Movie: Movie obj with all the responsed data neatly formatted
"""
resp_json = self._get(params=query.params)
return self._from_resp_to_movie(movie_resp=resp_json)
def search_movies(self, query: Query) -> List[Movie]:
""" Runs the query provided to return a list of movies
Args:
query (Query): Query with all the data needed to run the request
Returns:
List[Movie]: List of Movie objs with partial data for each movie
"""
resp_json = self._get(params=query.params)
return self._from_resp_to_search(search_resp=resp_json)
def _get(self, params: Dict[str, Optional[str]], retry: int=10) -> Dict[str, Any]:
""" Handles a get request to OMDbAPI given a dict of parameters
Args:
params (Dict[str, str]): Parameters for OMDbAPI in OMDb format (http://www.omdbapi.com/)
retry (int): retry count
Returns:
Dict[str, Any]: Response from request as a dict
"""
if not retry:
print('Too many retries, Please check your api key')
sys.exit(0)
params['apikey'] = self.config.api_key
params['type'] = 'movie'
try:
resp = requests.get(self.base_url, params=params)
resp.raise_for_status()
except requests.exceptions.HTTPError as err:
logging.debug('HTTPError: %s', err)
print('Your api key was invalid or not yet activated, Please check email and make sure to type your key correctly')
self.config.invalidate_key()
return self._get(params=params, retry=retry-1)
logging.debug('Get Resp: %s', resp.json())
return resp.json()
def _from_resp_to_movie(self, movie_resp: Dict[str, Any]) -> Movie:
""" Converts the JSON response received from OMDbAPI into a Movie object
Args:
movie_resp (Dict): Dict from OMDbAPI JSON movie response
Returns:
Movie: Object with all the movies properties completely populated
"""
movie_resp = {key.lower(): value for key, value in movie_resp.items()}
movie_resp['item_type'] = movie_resp['type']
del movie_resp['type']
return Movie(**movie_resp)
def _from_resp_to_search(self, search_resp: Dict[str, Any]) -> List[Movie]:
""" Converts the JSON response received from OMDbAPI into a list of Movie objects
Args:
search_resp (Dict[str, Any]): Dict from OMDb JSON search response
Returns:
List[Movie]: List of objects with movie properties partially populated
(only: Title, Year, imdbID, Type, Poster)
"""
resp_list: Dict[str, Any] = search_resp.get('Search', {})
if resp_list:
results_list = [self._from_resp_to_movie(item) for item in search_resp['Search']]
return results_list
else:
raise KeyError("No search results found")
| StarcoderdataPython |
3284699 | <reponame>changgoo/pyathena-1
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as au
import astropy.constants as ac
# from radps_sp.mass_to_lum import _mass_to_lum
# import hii
class Cloud(object):
"""
Simple class for spherical clouds
Initialize by giving two of M, R, or Sigma (in units of M_sun, pc)
"""
mH = ac.u*1.008 # Mass of a hydrogen atom
# Particle mass per hydrogen atom (in fully atomic gas)
muH = 1.4*mH.to(au.gram)
def __init__(self, M=None, R=None, Sigma=None, alpha_vir=2.0):
if M is not None and Sigma is not None and R is not None:
raise ValueError('Exactly two of M, R, Sigma must be defined')
# Check if input is dimensionless
if M is not None and not isinstance(M, au.quantity.Quantity):
M = (M*ac.M_sun).to(au.M_sun)
if R is not None and not isinstance(R, au.quantity.Quantity):
R = (R*ac.pc).to(au.pc)
if Sigma is not None and not isinstance(Sigma, au.quantity.Quantity):
Sigma = (Sigma*ac.M_sun/ac.pc**2).to(ac.M_sun/ac.pc**2)
if M is not None and Sigma is not None: # M and Sigma are given
self.M = M
self.Sigma = Sigma
self.R = np.sqrt(M/(np.pi*Sigma))
elif M is not None and R is not None: # M and R are given
self.M = M
self.R = R
self.Sigma = M/(np.pi*R**2)
self.M = self.M.to(au.Msun)
self.R = self.R.to(au.pc)
self.Sigma = self.Sigma.to(au.Msun/au.pc**2)
self.rho = Cloud.calc_rho(self.M, self.R).to(au.g/au.cm**3)
self.nH = (self.rho/Cloud.muH).to(au.cm**(-3))
self.tff = Cloud.calc_tff(self.rho)
self.alpha_vir = alpha_vir
self.vesc = (np.sqrt((2.0*ac.G*self.M)/self.R)).to(au.km/au.s)
self.sigma1d = np.sqrt(self.alpha_vir/10.0)*self.vesc
def __str__(self):
if not self.M.shape:
return 'Cloud object: M={0:<5g}, R={1:<5g},'\
'Sigma={2:<5g}, nH={3:<5g}, tff={4:<5g}'.format(
self.M, self.R, self.Sigma, self.nH, self.tff)
else:
return 'Cloud objects with shape {0:s}'.format(self.M.shape)
@staticmethod
def calc_rho(M, R):
return M/(4.0*np.pi/3.0*R**3)
@staticmethod
def calc_tff(rho):
return np.sqrt((3.0*np.pi)/(32.0*ac.G*rho)).to(au.Myr)
@staticmethod
def calc_Sigma_from_M_vesc(M, vesc):
return ((vesc**2/(2.0*ac.G))**2/(np.pi*M)).to(au.M_sun/au.pc**2)
@staticmethod
def calc_Sigma_from_M_tff0(M, tff0):
return ((np.pi*M/64.0)**(1.0/3.0)/(tff0**2*ac.G)**(2.0/3.0)).to(au.M_sun/au.pc**2)
# class Cloud_HII(Cloud):
# def __init__(self, M=None, R=None, Sigma=None, alpha_vir=2.0,
# SFE=None, Tion=8.0e3, sigmad=1e-21):
# Cloud.__init__(self, M=M, R=R, Sigma=Sigma, alpha_vir=alpha_vir)
# if SFE is not None:
# self.SFE = self.set_SFE(SFE)
# self.Tion = Tion*au.K
# self.cion = (np.sqrt(2.1*ac.k_B*self.Tion/Cloud.muH)).cgs
# self.alphaB = hii.alphaB(self.Tion.value)*au.cm**3/au.s
# self.sigmad = sigmad*au.cm**2
# def set_SFE(self, SFE, iPsi=0):
# self.SFE = SFE
# self.Mstar = self.SFE*self.M
# self.set_L_Qi(SFE, iPsi=iPsi)
# self.calc_F_thm()
# self.calc_F_rad()
# def set_L_Qi(self, SFE, iPsi=0):
# self.L, self.Qi = _mass_to_lum(self.Mstar.value, iPsi=iPsi)
# self.L *= au.L_sun
# self.Qi /= au.s
# self.Psi = self.L/self.Mstar
# self.Xi = self.Qi/self.Mstar
# def get_nion_rms(self, Rst=None, fion=1.0):
# if Rst is None:
# Rst = self.R
# self.nion_rms = hii.nion_rms(self.Qi.cgs.value, self.alphaB.cgs.value,
# Rst.cgs.value, fion=fion)*au.cm**(-3)
# return self.nion_rms
# def get_Rst(self, nion_rms=None, fion=1.0):
# if nion_rms is None:
# self.get_nion_rms(self.R)
# else:
# self.nion_rms = nion_rms
# self.Rst = (hii.rst(self.Qi.cgs.value, self.alphaB.cgs.value,
# nion_rms.cgs.value, fion=fion)*au.cm).to(au.pc)
# # print '[get_Rst]:Qi,nion_rms,Rst ',self.Qi,self.nion_rms,self.Rst
# return self.Rst
# def Mdot_phot_blister(self):
# Area = 2.0*np.pi*self.R**2
# self.get_nion_rms()
# rho = self.nion_rms*Cloud.muH
# return rho*Area*self.cion
# def Mphot_blister(self, SFE=None, time=None):
# if SFE is not None:
# self.set_SFE(SFE)
# if time is None:
# time = self.tff
# return (self.Mdot_phot_blister()*time).to(au.Msun)
# def Mphot_rocket_blister(self, SFE=None, time=None):
# if SFE is not None:
# self.set_SFE(SFE)
# if time is None:
# time = self.tff
# Mphot = (self.Mdot_phot_blister()*time).to(au.Msun)
# Mdyn = Mphot*self.cion/self.vesc
# return Mphot+Mdyn
# def calc_F_thm(self, fion=0.5):
# T = (8.0*np.pi*ac.k_B*self.Tion) * \
# np.sqrt(3.0*fion*self.Xi/(4.0*np.pi*self.alphaB))
# self.Fthm = (T*self.M**0.75/(np.pi**0.25*self.Sigma**0.25)).cgs
# def calc_F_rad(self):
# self.Frad = (self.SFE*self.Psi*self.M/ac.c).cgs
if __name__ == '__main__':
Ms_ = np.logspace(3, 7, num=100)*ac.M_sun
Ss_ = np.logspace(1, 3, num=100)*ac.M_sun/ac.pc**2
Ms, Ss = np.meshgrid(Ms_, Ss_)
## Input is array
clouds = Cloud(M=Ms, Sigma=Ss)
# Array of objects
#clouds=np.array([[Cloud(M=M,Sigma=S) for S in Ss] for M in Ms])
# print clouds.M,clouds.Sigma,clouds.tff
levels = np.arange(-2, 2, 0.5)
CS = plt.contour(clouds.M, clouds.Sigma, np.log10(clouds.tff.value),
levels=levels, linestyles='dashed', colors='r', alpha=0.2)
plt.xscale('log')
plt.yscale('log')
manual_locations = [(-1, -1.4), (-0.62, -0.7), (-2, 0.5),
(1.7, 1.2), (2.0, 1.4), (2.4, 1.7)]
plt.savefig('cloud.png')
| StarcoderdataPython |
23277 | class Cpf:
def __init__(self, documento):
documento = str(documento)
if self.cpf_eh_valido(documento):
self.cpf = documento
else:
raise ValueError("CPF inválido!")
def cpf_eh_valido(self, documento):
if len(documento) == 11:
return True
else:
return False
def cpf_formato(self):
fatia_um = self.cpf[:3]
fatia_dois = self.cpf[3:6]
fatia_tres = self.cpf[6:9]
fatia_quatro = self.cpf[9:]
return(
"{}.{}.{}-{}".format(
fatia_um,
fatia_dois,
fatia_tres,
fatia_quatro
)
)
| StarcoderdataPython |
311455 | <reponame>Balothar12/uefg
import pathlib as pl
class ProjectDirectoryDoesNotExist(Exception):
def __init__(self, directory: pl.Path):
self.message = f"Directory {directory} does not exist, please specify a valid project directory."
class ProjectFileDoesNotExist(Exception):
def __init__(self, directory: pl.Path, project: str, extension: str):
self.message = f"Project file {project}.{extension} could not be found in project {project}."
class InvalidConfig(Exception):
def __init__(self, msg: str):
self.message = f"Invalid config: {msg}."
class NoFilesCouldBeCreated(Exception):
def __init__(self, files: [str]):
self.message = f"No files could be created: {files}"
class UnsupportedPlatform(Exception):
def __init__(self):
self.message = "Only Linux or Windows are valid platforms."
class InvalidFileType(Exception):
def __init__(self, specified_type):
self.message = f"Only \"ustruct\", \"uobject\", \"header\", \"source\" or \"build\" may be specified as generation modes (actual value: {specified_type})." | StarcoderdataPython |
4904431 | <filename>BigDataMicroMajor/Python/ComputerNetwork/VirtualStreetlight/Switch/test2.py
# -*- coding: utf-8 -*-
# @Time : 2021/1/1 15:57
# @Author : 咸鱼型233
# @File : test2.py
# @Software: PyCharm
# @Function:
import socket
def send_msg(udp_socket):
# 获取发送内容
ip_dst = input("请输入对方的ip:")
port_dst = int(input("请输入对方的端口号:"))
data_send = input("请输入发送信息:")
udp_socket.sendto(data_send.encode("utf-8"), (ip_dst, port_dst))
pass
def recv_msg(udp_socket):
recv_data = udp_socket.recvfrom(1024)
print("从(ip, 端口号)为{0}的主机收到消息:{1} ".format(str(recv_data[1]), recv_data[0].decode("utf-8")))
pass
def main():
# 创建udp套接字
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 绑定信息
udp_socket.bind(("", 8081))
while True:
send_msg(udp_socket)
recv_msg(udp_socket)
pass
if __name__ == '__main__':
main()
| StarcoderdataPython |
1733255 | <filename>envs/env_utils.py
import numpy as np
def MinMaxScaler(data):
"""Min Max normalizer.
Args:
- data: original data
Returns:
- norm_data: normalized data
"""
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
norm_data = numerator / (denominator + 1e-7)
return norm_data
| StarcoderdataPython |
3231872 | <reponame>codespider/greeter-grpc-service
import logging
import os
import requests
from greeter_grpc import greeter
PORT = os.environ.get('GREETING_SERVICE_PORT', 50051)
CONSUL_REGISTRATION_ENABLED = os.environ.get('CONSUL_REGISTRATION_ENABLED', 'FALSE')
CONSUL_SERVICE_NAME = os.environ.get('CONSUL_SERVICE_NAME', 'greeter')
CONSUL_AGENT_HOST = os.environ.get('CONSUL_AGENT_HOST', '127.0.0.1')
CONSUL_AGENT_PORT = os.environ.get('CONSUL_AGENT_PORT', 8500)
if __name__ == '__main__':
logging.basicConfig()
greeter.serve(PORT)
try:
requests.put(f"http://{CONSUL_AGENT_HOST}:8500/v1/agent/service/register", json={"Name": "greet-curl"})
except:
print("Error")
| StarcoderdataPython |
8120176 | <reponame>pnuz3n/respa
import logging
from parler.admin import TranslatableAdmin
from parler.forms import TranslatableModelForm
from django.core.exceptions import ValidationError
from django import forms
from django.contrib import admin
from django.contrib.admin import site as admin_site
from .models import NotificationTemplate, NotificationTemplateGroup
from resources.admin.base import PopulateCreatedAndModifiedMixin, CommonExcludeMixin
logger = logging.getLogger(__name__)
class NotificationTemplateForm(TranslatableModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class NotificationGroupForm(forms.ModelForm):
class Meta:
model = NotificationTemplateGroup
fields = ['identifier','name','templates']
def clean(self):
# Raise ValidationError if one tries to add a notification template to a group that already contains a template of that type.
# A template group cannot contain multiples of one type.
all_new_templates = self.cleaned_data['templates'].values_list('type', flat=True)
distinct_new_templates = all_new_templates.distinct()
if all_new_templates.count() != distinct_new_templates.count():
logger.info("Attempted to add a notification template to template group that already contains a template of that type.")
raise ValidationError('Template group cannot contain multiple templates of the same type.')
class NotificationGroupAdmin(PopulateCreatedAndModifiedMixin, CommonExcludeMixin,
admin.ModelAdmin):
form = NotificationGroupForm
class NotificationTemplateAdmin(TranslatableAdmin):
#
# When attempting to save, validate Jinja templates based on
# example data. Possible to get an exception if unknown context
# variables are accessed?
#
form = NotificationTemplateForm
admin_site.register(NotificationTemplateGroup, NotificationGroupAdmin)
admin_site.register(NotificationTemplate, NotificationTemplateAdmin)
| StarcoderdataPython |
11337150 | <gh_stars>0
num1 = float(input('Digite um número: '))
num2 = float(input('Digite um número: '))
if num1 > num2:
print('O número {:.2f} é maior que o número {:.2f}'.format(num1,num2))
elif num1 < num2:
print('O número {:.2f} é maior que o número {:.2f}'.format(num2,num1))
else:
print('O número {:.2f} é igual ao número {:.2f}'.format(num1,num2))
| StarcoderdataPython |
11375399 | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Problem list:
* next of the register should be driven from
* all assignments to a register has to be in stame process.
* The nop assignment should assign only a reange which corresponds to a current range assigned by statement.
"""
from hwt.code import If, Switch
from hwt.interfaces.std import VectSignal, Signal
from hwt.interfaces.utils import addClkRstn
from hwt.synthesizer.unit import Unit
from hwtLib.common_nonstd_interfaces.addr_data_hs import AddrDataHs, \
AddrDataBitMaskHs
from pyMathBitPrecise.bit_utils import mask
class AssignToASlice0(Unit):
"""
Conversion between vector and bit
"""
def _declr(self):
addClkRstn(self)
self.data_in = Signal()
self.data_out = VectSignal(1)._m()
def _impl(self):
self.data_out[0](self.data_in)
class AssignToASlice1(Unit):
"""
Vector parts driven by expr
"""
def _declr(self):
addClkRstn(self)
self.data_in = VectSignal(3)
self.data_out = VectSignal(3)._m()
def _impl(self):
for i in range(3):
self.data_out[i](self.data_in[i])
class AssignToASlice2(Unit):
"""
Vector parts driven from multi branch statement
"""
def _declr(self):
addClkRstn(self)
self.swap = Signal()
self.data_in = VectSignal(2)
self.data_out = VectSignal(3)._m()
def _impl(self):
i, o = self.data_in, self.data_out
If(self.swap,
o[0](i[1]),
o[1](i[0]),
o[2](i[0]),
).Else(
o[0](i[0]),
o[1](i[1]),
o[2](i[1]),
)
class AssignToASliceOfReg0(Unit):
"""
Register where slices of next signal are set conditionally
"""
def _declr(self):
addClkRstn(self)
i = self.data_in = AddrDataHs()
i.ADDR_WIDTH = 1
i.DATA_WIDTH = 8
self.data_out = VectSignal(2 * 8)._m()
def _impl(self):
i, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
i.rd(1)
for _i in range(2):
start = 8 * _i
If(i.vld & i.addr._eq(_i),
r[start + 8:start](i.data)
)
o(r)
class AssignToASliceOfReg1a(Unit):
"""
Register where slices of next signal are set conditionally in multiple branches
"""
def _declr(self):
addClkRstn(self)
i = self.data_in = AddrDataHs()
i.ADDR_WIDTH = 1
i.DATA_WIDTH = 16
self.data_out = VectSignal(2 * 8)._m()
def _impl(self):
i, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
i.rd(1)
If(i.vld & i.addr._eq(0),
r[8:](i.data[8:]),
r[:8](i.data[:8]),
).Elif(i.vld & i.addr._eq(1),
r[8:](i.data[:8]),
r[:8](i.data[8:]),
)
o(r)
class AssignToASliceOfReg1b(Unit):
"""
Register where slices of next signal are set conditionally in multiple branches, nested
"""
def _declr(self):
addClkRstn(self)
i = self.data_in = AddrDataHs()
i.ADDR_WIDTH = 1
i.DATA_WIDTH = 16
self.data_out = VectSignal(2 * 8)._m()
def _impl(self):
i, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
i.rd(1)
If(i.vld,
If(i.addr._eq(0),
r[8:](i.data[8:]),
r[:8](i.data[:8]),
).Elif(i.addr._eq(1),
r[8:](i.data[:8]),
r[:8](i.data[8:]),
)
)
o(r)
class AssignToASliceOfReg2a(Unit):
"""
Register where an overlapping slices of next signal are set conditionally
"""
def _declr(self):
addClkRstn(self)
i = self.data_in = AddrDataBitMaskHs()
i.ADDR_WIDTH = 1
i.DATA_WIDTH = 8
self.data_out = VectSignal(2 * 8)._m()
def _impl(self):
i, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
i.rd(1)
for _i in range(2):
start = 8 * _i
If(i.vld & i.addr._eq(_i),
If(i.mask._eq(mask(8)),
r[start + 8:start](i.data)
).Elif(i.mask._eq(mask(4)),
r[start + 4:start](i.data[4:])
)
)
o(r)
class AssignToASliceOfReg2b(Unit):
"""
Register where an overlapping slices of next signal are set conditionally
"""
def _declr(self):
addClkRstn(self)
i = self.data_in = AddrDataBitMaskHs()
i.ADDR_WIDTH = 1
i.DATA_WIDTH = 8
self.data_out = VectSignal(2 * 8)._m()
def _impl(self):
i, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
i.rd(1)
for _i in range(2):
start = 8 * _i
If(i.vld & i.addr._eq(_i),
If(i.mask._eq(mask(8)),
r[start + 8:start](i.data)
).Elif(i.mask._eq(mask(4)),
r[start + 8:start + 4](i.data[4:])
)
)
o(r)
class AssignToASliceOfReg3a(Unit):
"""
Something not assigned by index at the end and then whole signal assigned.
"""
def _declr(self):
addClkRstn(self)
i = self.data_in = AddrDataBitMaskHs()
i.ADDR_WIDTH = 2
i.DATA_WIDTH = 8
self.data_out = VectSignal(4 * 8)._m()
def _impl(self):
din, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
Switch(din.addr).add_cases(
((i, r[(i + 1) * 8:i * 8](din.data))
for i in range(3))
).Default(
r(123)
)
din.rd(1)
o(r)
class AssignToASliceOfReg3b(AssignToASliceOfReg3a):
"""
Something not assigned by index in the middle and then whole signal assigned.
"""
def _impl(self):
din, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
Switch(din.addr)\
.Case(0,
r[8:0](din.data)
).Case(2,
r[24:16](din.data)
).Case(3,
r[32:24](din.data)
).Default(
r(123)
)
din.rd(1)
o(r)
class AssignToASliceOfReg3c(AssignToASliceOfReg3a):
"""
Something not assigned by index at the beggining and then whole signal assigned.
"""
def _impl(self):
din, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
Switch(din.addr).add_cases(
((i, r[(i + 1) * 8:i * 8](din.data))
for i in range(1, 4))
).Default(
r(123)
)
din.rd(1)
o(r)
class AssignToASliceOfReg3d(AssignToASliceOfReg3a):
"""
Only a small fragment assigned and then whole signal assigned.
"""
def _impl(self):
din, o = self.data_in, self.data_out
r = self._reg("r", self.data_out._dtype, def_val=0)
Switch(din.addr)\
.Case(1,
r[16:8](din.data)
).Default(
r(123)
)
din.rd(1)
o(r)
if __name__ == '__main__':
from hwt.synthesizer.utils import to_rtl_str
print(to_rtl_str(AssignToASliceOfReg3d()))
| StarcoderdataPython |
270077 | from numba import jit
import numpy as np
import math
@jit(nopython=True, fastmath=True)
def init_w(w, n):
"""
:purpose:
Initialize a weight array consistent of 1s if none is given
This is called at the start of each function containing a w param
:params:
w : a weight vector, if one was given to the initial function, else None
NOTE: w MUST be an array of np.float64. so, even if you want a boolean w,
convert it to np.float64 (using w.astype(np.float64)) before passing it to
any function
n : the desired length of the vector of 1s (often set to len(u))
:returns:
w : an array of 1s with shape (n,) if w is None, else return w un-changed
"""
if w is None:
return np.ones(n)
else:
return w
@jit(nopython=True, fastmath=True)
def braycurtis(u, v, w=None):
"""
:purpose:
Computes the Bray-Curtis distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
braycurtis : float, the Bray-Curtis distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.braycurtis(u, v, w)
0.3359619981199086
"""
n = len(u)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
num += abs(u[i] - v[i]) * w[i]
denom += abs(u[i] + v[i]) * w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def canberra(u, v, w=None):
"""
:purpose:
Computes the Canberra distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
canberra : float, the Canberra distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.canberra(u, v, w)
1951.0399135013315
"""
n = len(u)
w = init_w(w, n)
dist = 0
for i in range(n):
num = abs(u[i] - v[i])
denom = abs(u[i]) + abs(v[i])
dist += num / denom * w[i]
return dist
@jit(nopython=True, fastmath=True)
def chebyshev(u, v, w=None):
"""
:purpose:
Computes the Chebyshev distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
w : here, w does nothing. it is only here for consistency
with the other functions
:returns:
chebyshev : float, the Chebyshev distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.chebyshev(u, v, w)
0.9934922585052587
"""
return max(np.abs(u - v))
@jit(nopython=True, fastmath=True)
def cityblock(u, v, w=None):
"""
:purpose:
Computes the City Block distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
cityblock : float, the City Block distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.cityblock(u, v, w)
1667.904767711218
"""
n = len(u)
w = init_w(w, n)
dist = 0
for i in range(n):
dist += abs(u[i] - v[i]) * w[i]
return dist
@jit(nopython=True, fastmath=True)
def correlation(u, v, w=None, centered=True):
"""
:purpose:
Computes the correlation between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
correlation : float, the correlation between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.correlation(u, v, w)
0.9907907248975348
"""
n = len(u)
w = init_w(w, n)
u_centered, v_centered = u - np.mean(u), v - np.mean(v)
num = 0
u_norm, v_norm = 0, 0
for i in range(n):
num += u_centered[i] * v_centered[i] * w[i]
u_norm += abs(u_centered[i]) ** 2 * w[i]
v_norm += abs(v_centered[i]) ** 2 * w[i]
denom = (u_norm * v_norm) ** (1 / 2)
return 1 - num / denom
@jit(nopython=True, fastmath=True)
def cosine(u, v, w=None):
"""
:purpose:
Computes the cosine similarity between two 1D arrays
Unlike scipy's cosine distance, this returns similarity, which is 1 - distance
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
cosine : float, the cosine similarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.cosine(u, v, w)
0.7495065944399267
"""
n = len(u)
w = init_w(w, n)
num = 0
u_norm, v_norm = 0, 0
for i in range(n):
num += u[i] * v[i] * w[i]
u_norm += abs(u[i]) ** 2 * w[i]
v_norm += abs(v[i]) ** 2 * w[i]
denom = (u_norm * v_norm) ** (1 / 2)
return num / denom
@jit(nopython=True, fastmath=True)
def cosine_vector_to_matrix(u, m):
"""
:purpose:
Computes the cosine similarity between a 1D array and rows of a matrix
:params:
u : input vector of shape (n,)
m : input matrix of shape (m, n)
:returns:
cosine vector : np.array, of shape (m,) vector containing cosine similarity between u
and the rows of m
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u = np.random.RandomState(seed=0).rand(10)
>>> m = np.random.RandomState(seed=0).rand(100, 10)
>>> fastdist.cosine_vector_to_matrix(u, m)
(returns an array of shape (100,))
"""
norm = 0
for i in range(len(u)):
norm += abs(u[i]) ** 2
u = u / norm ** (1 / 2)
for i in range(m.shape[0]):
norm = 0
for j in range(len(m[i])):
norm += abs(m[i][j]) ** 2
m[i] = m[i] / norm ** (1 / 2)
return np.dot(u, m.T)
@jit(nopython=True, fastmath=True)
def cosine_matrix_to_matrix(a, b):
"""
:purpose:
Computes the cosine similarity between the rows of two matrices
:params:
a, b : input matrices of shape (m, n) and (k, n)
the matrices must share a common dimension at index 1
:returns:
cosine matrix : np.array, an (m, k) array of the cosine similarity
between the rows of a and b
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> a = np.random.RandomState(seed=0).rand(10, 50)
>>> b = np.random.RandomState(seed=0).rand(100, 50)
>>> fastdist.cosine_matrix_to_matrix(a, b)
(returns an array of shape (10, 100))
"""
for i in range(a.shape[0]):
norm = 0
for j in range(len(a[i])):
norm += abs(a[i][j]) ** 2
a[i] = a[i] / norm ** (1 / 2)
for i in range(b.shape[0]):
norm = 0
for j in range(len(b[i])):
norm += abs(b[i][j]) ** 2
b[i] = b[i] / norm ** (1 / 2)
return np.dot(a, b.T)
@jit(nopython=True, fastmath=True)
def cosine_pairwise_distance(a, return_matrix=False):
"""
:purpose:
Computes the cosine similarity between the pairwise combinations of the rows of a matrix
:params:
a : input matrix of shape (n, k)
return_matrix : bool, whether to return the similarity as an (n, n) matrix
in which the (i, j) element is the cosine similarity
between rows i and j. if true, return the matrix.
if false, return a (n choose 2, 1) vector of the
similarities
:returns:
cosine matrix : np.array, either an (n, n) matrix if return_matrix=True,
or an (n choose 2, 1) array if return_matrix=False
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> a = np.random.RandomState(seed=0).rand(10, 50)
>>> fastdist.cosine_pairwise_distance(a, return_matrix=False)
(returns an array of shape (45, 1))
alternatively, with return_matrix=True:
>>> from fastdist import fastdist
>>> import numpy as np
>>> a = np.random.RandomState(seed=0).rand(10, 50)
>>> fastdist.cosine_pairwise_distance(a, return_matrix=True)
(returns an array of shape (10, 10))
"""
n = a.shape[0]
rows = np.arange(n)
perm = [(rows[i], rows[j]) for i in range(n) for j in range(i + 1, n)]
for i in range(n):
norm = 0
for j in range(len(a[i])):
norm += abs(a[i][j]) ** 2
a[i] = a[i] / norm ** (1 / 2)
if return_matrix:
out_mat = np.zeros((n, n))
for i in range(n):
for j in range(i):
out_mat[i][j] = np.dot(a[i], a[j])
out_mat = out_mat + out_mat.T
np.fill_diagonal(out_mat,1)
return out_mat
else:
out = np.zeros((len(perm), 1))
for i in range(len(perm)):
out[i] = np.dot(a[perm[i][0]], a[perm[i][1]])
return out
@jit(nopython=True, fastmath=True)
def euclidean(u, v, w=None):
"""
:purpose:
Computes the Euclidean distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
euclidean : float, the Euclidean distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.euclidean(u, v, w)
28.822558591834163
"""
n = len(u)
w = init_w(w, n)
dist = 0
for i in range(n):
dist += abs(u[i] - v[i]) ** 2 * w[i]
return dist ** (1 / 2)
@jit(nopython=True, fastmath=True)
def rel_entr(x, y):
"""
:purpose:
Computes the relative entropy between two 1D arrays
Used primarily for the jensenshannon function
:params:
x, y : input arrays, both of shape (n,)
to get a numerical value, x and y should be strictly non-negative;
negative values result in infinite relative entropy
:returns:
rel_entr : float, the relative entropy distance of x and y
"""
total_entr = 0
for i in range(len(x)):
if x[i] > 0 and y[i] > 0:
total_entr += x[i] * math.log(x[i] / y[i])
elif x[i] == 0 and y[i] >= 0:
total_entr += 0
else:
total_entr += np.inf
return total_entr
@jit(nopython=True, fastmath=True)
def jensenshannon(p, q, base=None):
"""
:purpose:
Computes the Jensen-Shannon divergence between two 1D probability arrays
:params:
u, v : input probability arrays, both of shape (n,)
note that because these are probability arrays, they are strictly non-negative
base : the base of the logarithm for the output
:returns:
jensenshannon : float, the Jensen-Shannon divergence between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).uniform(size=(10000, 2)).T
>>> fastdist.jensenshannon(u, v, base=2)
0.39076147897868996
"""
p_sum, q_sum = 0, 0
for i in range(len(p)):
p_sum += p[i]
q_sum += q[i]
p, q = p / p_sum, q / q_sum
m = (p + q) / 2
num = rel_entr(p, m) + rel_entr(q, m)
if base is not None:
num /= math.log(base)
return (num / 2) ** (1 / 2)
@jit(nopython=True, fastmath=True)
def mahalanobis(u, v, VI):
"""
:purpose:
Computes the Mahalanobis distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
VI : the inverse of the covariance matrix of u and v
note that some arrays will result in a VI containing
very high values, leading to some imprecision
:returns:
mahalanobis : float, the Mahalanobis distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.array([2, 0, 0]).astype(np.float64), np.array([0, 1, 0]).astype(np.float64)
>>> VI = np.array([[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]])
>>> fastdist.mahalanobis(u, v, VI)
1.7320508075688772
"""
delta = (u - v)
return np.dot(np.dot(delta, VI), delta) ** (1 / 2)
@jit(nopython=True, fastmath=True)
def minkowski(u, v, p, w=None):
"""
:purpose:
Computes the Minkowski distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
p : the order of the norm (p=2 is the same as Euclidean)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
minkowski : float, the Minkowski distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> p = 3
>>> fastdist.minkowski(u, v, p, w)
7.904971256091215
"""
n = len(u)
w = init_w(w, n)
dist = 0
for i in range(n):
dist += abs(u[i] - v[i]) ** p * w[i]
return dist ** (1 / p)
@jit(nopython=True, fastmath=True)
def seuclidean(u, v, V):
"""
:purpose:
Computes the standardized Euclidean distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
V : array of shape (n,) containing component variances
:returns:
seuclidean : float, the standardized Euclidean distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, V = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.seuclidean(u, v, V)
116.80739235578636
"""
return euclidean(u, v, w=1 / V)
@jit(nopython=True, fastmath=True)
def sqeuclidean(u, v, w=None):
"""
:purpose:
Computes the squared Euclidean distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
sqeuclidean : float, the squared Euclidean distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.sqeuclidean(u, v, w)
830.7398837797134
"""
n = len(u)
w = init_w(w, n)
dist = 0
for i in range(n):
dist += abs(u[i] - v[i]) ** 2 * w[i]
return dist
@jit(nopython=True, fastmath=True)
def dice(u, v, w=None):
"""
:purpose:
Computes the Dice dissimilarity between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
dice : float, the Dice dissimilarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.dice(u, v, w)
0.5008483098538385
"""
n = len(u)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
num += u[i] * v[i] * w[i]
denom += (u[i] + v[i]) * w[i]
return 1 - 2 * num / denom
@jit(nopython=True, fastmath=True)
def hamming(u, v, w=None):
"""
:purpose:
Computes the Hamming distance between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
hamming : float, the Hamming distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.hamming(u, v, w)
0.5061006361240681
"""
n = len(u)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
if u[i] != v[i]:
num += w[i]
denom += w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def jaccard(u, v, w=None):
"""
:purpose:
Computes the Jaccard-Needham dissimilarity between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
jaccard : float, the Jaccard-Needham dissimilarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.jaccard(u, v, w)
0.6674202936639468
"""
n = len(u)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
if u[i] != v[i]:
num += w[i]
denom += w[i]
denom += u[i] * v[i] * w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def kulsinski(u, v, w=None):
"""
:purpose:
Computes the Kulsinski dissimilarity between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
kulsinski : float, the Kulsinski dissimilarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.kulsinski(u, v, w)
0.8325522836573094
"""
n = len(u)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
num += (1 - u[i] * v[i]) * w[i]
if u[i] != v[i]:
num += w[i]
denom += w[i]
denom += w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def rogerstanimoto(u, v, w=None):
"""
:purpose:
Computes the Rogers-Tanimoto dissimilarity between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
rogerstanimoto : float, the Rogers-Tanimoto dissimilarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.rogerstanimoto(u, v, w)
0.672067488699178
"""
n = len(u)
w = init_w(w, n)
r, denom = 0, 0
for i in range(n):
if u[i] != v[i]:
r += 2 * w[i]
else:
denom += w[i]
return r / (denom + r)
@jit(nopython=True, fastmath=True)
def russellrao(u, v, w=None):
"""
:purpose:
Computes the Ruseell-Rao dissimilarity between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
russelrao : float, the Russell-Rao dissimilarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.russellrao(u, v, w)
0.7478068878987577
"""
n = len(u)
w = init_w(w, n)
num, n = 0, 0
for i in range(n):
num += u[i] * v[i] * w[i]
n += w[i]
return (n - num) / n
@jit(nopython=True, fastmath=True)
def sokalmichener(u, v, w=None):
"""
:purpose:
Computes the Sokal-Michener dissimilarity between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
sokalmichener : float, the Sokal-Michener dissimilarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.sokalmichener(u, v, w)
0.672067488699178
:note:
scipy's implementation returns a different value in the above example.
when no w is given, our implementation and scipy's are the same.
to replicate scipy's result of 0.8046210454292805, we can replace
r += 2 * w[i] with r += 2, but then that does not apply the weights.
so, we use (what we think) is the correct weight implementation
"""
n = len(u)
w = init_w(w, n)
r, s = 0, 0
for i in range(n):
if u[i] != v[i]:
r += 2 * w[i]
else:
s += w[i]
return r / (s + r)
@jit(nopython=True, fastmath=True)
def sokalsneath(u, v, w=None):
"""
:purpose:
Computes the Sokal-Sneath dissimilarity between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
sokalsneath : float, the Sokal-Sneath dissimilarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.sokalsneath(u, v, w)
0.8005423661929552
"""
n = len(u)
w = init_w(w, n)
r, denom = 0, 0
for i in range(n):
if u[i] != v[i]:
r += 2 * w[i]
denom += u[i] * v[i] * w[i]
return r / (r + denom)
@jit(nopython=True, fastmath=True)
def yule(u, v, w=None):
"""
:purpose:
Computes the Yule dissimilarity between two boolean 1D arrays
:params:
u, v : boolean input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
yule : float, the Sokal-Sneath dissimilarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v = np.random.RandomState(seed=0).randint(2, size=(10000, 2)).T
>>> w = np.random.RandomState(seed=0).rand(10000)
>>> fastdist.yule(u, v, w)
1.0244476251862624
"""
n = len(u)
w = init_w(w, n)
ctf, cft, ctt, cff = 0, 0, 0, 0
for i in range(n):
if u[i] != v[i] and u[i] == 1:
ctf += w[i]
elif u[i] != v[i] and u[i] == 0:
cft += w[i]
elif u[i] == v[i] == 1:
ctt += w[i]
elif u[i] == v[i] == 0:
cff += w[i]
return (2 * ctf * cft) / (ctt * cff + ctf * cft)
@jit(nopython=True, fastmath=True)
def vector_to_matrix_distance(u, m, metric, metric_name):
"""
:purpose:
Computes the distance between a vector and the rows of a matrix using any given metric
:params:
u : input vector of shape (n,)
m : input matrix of shape (m, n)
metric : the function used to calculate the distance
metric_name : str of the function name. this is only used for
the if statement because cosine similarity has its
own function
distance vector : np.array, of shape (m,) vector containing the distance between u
and the rows of m
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u = np.random.RandomState(seed=0).rand(10)
>>> m = np.random.RandomState(seed=0).rand(100, 10)
>>> fastdist.vector_to_matrix_distance(u, m, fastdist.cosine, "cosine")
(returns an array of shape (100,))
:note:
the cosine similarity uses its own function, cosine_vector_to_matrix.
this is because normalizing the rows and then taking the dot product
of the vector and matrix heavily optimizes the computation. the other similarity
metrics do not have such an optimization, so we loop through them
"""
if metric_name == "cosine":
return cosine_vector_to_matrix(u, m)
n = m.shape[0]
out = np.zeros((n))
for i in range(n):
out[i] = metric(u, m[i])
return out
@jit(nopython=True, fastmath=True)
def matrix_to_matrix_distance(a, b, metric, metric_name):
"""
:purpose:
Computes the distance between the rows of two matrices using any given metric
:params:
a, b : input matrices either of shape (m, n) and (k, n)
the matrices must share a common dimension at index 1
metric : the function used to calculate the distance
metric_name : str of the function name. this is only used for
the if statement because cosine similarity has its
own function
:returns:
distance matrix : np.array, an (m, k) array of the distance
between the rows of a and b
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> a = np.random.RandomState(seed=0).rand(10, 50)
>>> b = np.random.RandomState(seed=0).rand(100, 50)
>>> fastdist.matrix_to_matrix_distance(a, b, fastdist.cosine, "cosine")
(returns an array of shape (10, 100))
:note:
the cosine similarity uses its own function, cosine_matrix_to_matrix.
this is because normalizing the rows and then taking the dot product
of the two matrices heavily optimizes the computation. the other similarity
metrics do not have such an optimization, so we loop through them
"""
if metric_name == "cosine":
return cosine_matrix_to_matrix(a, b)
n, m = a.shape[0], b.shape[0]
out = np.zeros((n, m))
for i in range(n):
for j in range(m):
out[i][j] = metric(a[i], b[j])
return out
@jit(nopython=True, fastmath=True)
def matrix_pairwise_distance(a, metric, metric_name, return_matrix=False):
"""
:purpose:
Computes the distance between the pairwise combinations of the rows of a matrix
:params:
a : input matrix of shape (n, k)
metric : the function used to calculate the distance
metric_name : str of the function name. this is only used for
the if statement because cosine similarity has its
own function
return_matrix : bool, whether to return the similarity as an (n, n) matrix
in which the (i, j) element is the cosine similarity
between rows i and j. if true, return the matrix.
if false, return a (n choose 2, 1) vector of the
similarities
:returns:
distance matrix : np.array, either an (n, n) matrix if return_matrix=True,
or an (n choose 2, 1) array if return_matrix=False
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> a = np.random.RandomState(seed=0).rand(10, 50)
>>> fastdist.matrix_pairwise_distance(a, fastdist.euclidean, "euclidean", return_matrix=False)
(returns an array of shape (45, 1))
alternatively, with return_matrix=True:
>>> from fastdist import fastdist
>>> import numpy as np
>>> a = np.random.RandomState(seed=0).rand(10, 50)
>>> fastdist.matrix_pairwise_distance(a, fastdist.euclidean, "euclidean", return_matrix=True)
(returns an array of shape (10, 10))
"""
if metric_name == "cosine":
return cosine_pairwise_distance(a, return_matrix)
else:
n = a.shape[0]
rows = np.arange(n)
perm = [(rows[i], rows[j]) for i in range(n) for j in range(i + 1, n)]
if return_matrix:
out_mat = np.zeros((n, n))
for i in range(n):
for j in range(i):
out_mat[i][j] = metric(a[i], a[j])
return out_mat + out_mat.T
else:
out = np.zeros((len(perm), 1))
for i in range(len(perm)):
out[i] = metric(a[perm[i][0]], a[perm[i][1]])
return out
## START OF SKLEARN METRICS IMPLEMENTATION
@jit(nopython=True, fastmath=True)
def variance(u, w=None):
"""
:purpose:
Computes the variance of a 1D array, used for r2 and explained variance score
:params:
u : input array of shape (n,)
w : weights at each index of u. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
variance : float, the variance of u
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, w = np.random.RandomState(seed=0).rand(10000, 2).T
>>> fastdist.variance(u, w)
0.08447496068498446
"""
n = len(u)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
num += u[i] * w[i]
denom += w[i]
m = num / denom
num, denom = 0, 0
for i in range(n):
num += abs(u[i] - m) ** 2 * w[i]
denom += w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def r2_score(true, pred, w=None):
"""
:purpose:
Computes the r2 score between a predictions array and a target array
:params:
true, pred : input arrays, both of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
r2_score : float, the r2 score of the targets and predictions
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true, pred, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.r2_score(true, pred, w)
-0.9797313432213313
"""
n = len(true)
w = init_w(w, n)
var_true = variance(true, w)
num, denom = 0, 0
for i in range(n):
num += (pred[i] - true[i]) ** 2 * w[i]
denom += w[i]
return 1 - ((num / denom) / var_true)
@jit(nopython=True, fastmath=True)
def explained_variance_score(true, pred, w=None):
"""
:purpose:
Computes the explained variance score between a predictions array and a target array
:params:
true, pred : input arrays, both of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
explained_variance_score : float, the explained variance score of the targets and predictions
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true, pred, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.explained_variance_score(true, pred, w)
-0.979414934822632
"""
var_true = variance(true, w=w)
var_diff = variance(pred - true, w=w)
return 1 - (var_diff / var_true)
@jit(nopython=True, fastmath=True)
def max_error(true, pred, w=None):
"""
:purpose:
Computes the max error between a predictions array and a target array
:params:
true, pred : input arrays, both of shape (n,)
w : here, w does nothing. it is only here for consistency
with the other functions
:returns:
max_error : float, the max error of the targets and predictions
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true, pred, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.max_error(true, pred, w)
0.9934922585052587
"""
return max(np.abs(true - pred))
@jit(nopython=True, fastmath=True)
def mean_absolute_error(true, pred, w=None):
"""
:purpose:
Computes the mean absolute error between a predictions array and a target array
:params:
true, pred : input arrays, both of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
mean_absolute_error : float, the mean absolute error of the targets and predictions
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true, pred, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.mean_absolute_error(true, pred, w)
0.3353421174411754
"""
n = len(true)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
num += abs(true[i] - pred[i]) * w[i]
denom += w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def mean_squared_error(true, pred, w=None, squared=True):
"""
:purpose:
Computes the mean squared error between a predictions array and a target array
(can also be used for root mean squared error by setting squared=False)
:params:
true, pred : input arrays, both of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
squared : whether to return MSE or RMSE, defaults to True, which
returns MSE (set to false for RMSE)
:returns:
mean_squared_error : float, the mean squared error of the targets and predictions
:example for mean_squared_error:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true, pred, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.mean_squared_error(true, pred, w, squared=True)
0.16702516658178812
:example for root_mean_squared error:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true, pred, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.mean_squared_error(true, pred, w, squared=False)
0.40868712553956005
"""
to_square = 1 if squared else 2
n = len(true)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
num += abs(true[i] - pred[i]) ** 2 * w[i]
denom += w[i]
return (num / denom) ** (1 / to_square)
@jit(nopython=True, fastmath=True)
def mean_squared_log_error(true, pred, w=None):
"""
:purpose:
Computes the mean squared log error between a predictions array and a target array
:params:
true, pred : input arrays, both of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
mean_squared_log_error : float, the mean squared log error of the targets and predictions
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true, pred, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.mean_squared_log_error(true, pred, w)
0.07840806721686663
"""
n = len(true)
w = init_w(w, n)
num, denom = 0, 0
for i in range(n):
num += abs(math.log(true[i] + 1) - math.log(pred[i] + 1)) ** 2 * w[i]
denom += w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def median_absolute_error(true, pred, w=None):
"""
:purpose:
Computes the median absolute error between a predictions array and a target array
:params:
true, pred : input arrays, both of shape (n,)
w : here, w does nothing. it is only here for consistency
with the other functions
:returns:
median_absolute_error : float, the median absolute error of the targets and predictions
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true, pred, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.median_absolute_error(true, pred, w)
0.2976962111211224
"""
return np.median(np.abs(true - pred))
@jit(nopython=True, fastmath=True)
def confusion_matrix(targets, preds, w=None, normalize=None):
"""
:purpose:
Creates a confusion matrix for an array of target and predicted classes
(used in most of the other classification metrics, along with having its own use)
:params:
targets, preds : discrete input arrays, both of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
normalize : how to normalize (if at all) the confusion matrix. options are
"true", which makes each row sum to 1, "pred", which makes columns
sum to 1, and "all", which makes the entire matrix sum to 1
:returns:
confusion_matrix : a confusion matrix (np.array) of shape (n_classes, n_classes)
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.confusion_matrix(true, pred)
array([[2412., 2503.],
[2594., 2491.]])
"""
w = init_w(w, len(targets))
n = max(len(np.unique(targets)), len(np.unique(preds)))
cm = np.zeros((n, n))
for i in range(n):
for j in range(n):
correct = 0
for val in range(len(targets)):
if targets[val] == i and preds[val] == j:
correct += w[val]
cm[i][j] = correct
if normalize is None:
return cm
elif normalize == 'true':
for i in range(n):
row_sum = 0
for j in range(n):
row_sum += cm[i][j]
cm[i] /= row_sum
return cm
elif normalize == 'pred':
for i in range(n):
col_sum = 0
for j in range(n):
col_sum += cm[j][i]
cm[:, i] /= col_sum
return cm
elif normalize == 'all':
total_sum = 0
for i in range(n):
for j in range(n):
total_sum += cm[i][j]
return cm / total_sum
@jit(nopython=True, fastmath=True)
def accuracy_score(targets, preds, cm=None, w=None, normalize=True):
"""
:purpose:
Calculates the accuracy score between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
normalize : bool. if true, the function returns (correct / total),
if false, the function returns (correct). defaults to true
:returns:
accuracy_score : float, the accuracy score of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.accuracy_score(true, pred)
0.4903
"""
w = init_w(w, len(targets))
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
num, denom = 0, 0
for i in range(n):
num += cm[i][i] # sum of the diagonal = true results
for j in range(n):
denom += cm[i][j] # total sum = true and false results
return num / denom if normalize else num
@jit(nopython=True, fastmath=True)
def balanced_accuracy_score(targets, preds, cm=None, w=None, adjusted=False):
"""
:purpose:
Calculates the balanced accuracy score between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
adjusted : bool. if true, adjust the output for chance (making 0 the worst
and 1 the best score). defaults to false
:returns:
balanced_accuracy_score : float, the balanced accuracy score of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.balanced_accuracy_score(true, pred)
0.49030739883826424
"""
w = init_w(w, len(targets))
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i] # sum of the diagonal = true results
for j in range(n):
row_sums[i] += cm[i][j] # sums of the rows = original ground truth class assignments
class_div = diag / row_sums # fraction of correctly recovered targets per class
div_mean = 0
for i in range(n):
div_mean += class_div[i]
div_mean /= n # mean fraction of correctly recovered targets
if adjusted:
div_mean -= 1 / n
div_mean /= 1 - 1 / n
return div_mean
@jit(nopython=True, fastmath=True)
def mean_predictive_value(targets, preds, cm=None, w=None, adjusted=False):
"""
:purpose:
Calculates the mean predictive value between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
adjusted : bool. if true, adjust the output for chance (making 0 the worst
and 1 the best score). defaults to false
:returns:
mean_predictive_value : float, the mean predictive value of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.mean_predictive_value(true, pred)
0.49030739883826424
by saskra
"""
w = init_w(w, len(targets))
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, columns_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i] # sum of the diagonal = true results
for j in range(n):
columns_sums[j] += cm[i][j] # sums of the columns = predictions per class
class_div = diag / columns_sums # fraction of true results among the predicted ones per class
div_mean = 0
for i in range(n):
div_mean += class_div[i]
div_mean /= n # mean fraction of true results among the predicted ones
if adjusted:
div_mean -= 1 / n
div_mean /= 1 - 1 / n
return div_mean
@jit(nopython=True, fastmath=True)
def mean_iou(targets, preds, cm=None, w=None, adjusted=False):
"""
:purpose: Calculates the mean intersection of ground truth and prediction over union
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
adjusted : bool. if true, adjust the output for chance (making 0 the worst
and 1 the best score). defaults to false
:returns:
mean_iou : float, the mean intersection over union of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.mean_iou(true, pred)
0.49030739883826424
by saskra
"""
w = init_w(w, len(targets))
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, rows_sums, columns_sums = np.zeros(n), np.zeros(n), np.zeros(n)
for i in range(n):
for j in range(n):
if i == j:
diag[i] = cm[i][j] # sum of the diagonal = true results
else:
rows_sums[i] += cm[i][j] # rest of the row = false negative results
columns_sums[j] += cm[i][j] # rest of the column = false positive results
class_div = diag / (columns_sums + rows_sums + diag) # intersection over union (Jaccard) per class
div_mean = 0
for i in range(n):
div_mean += class_div[i]
div_mean /= n # mean intersection over union
if adjusted:
div_mean -= 1 / n
div_mean /= 1 - 1 / n
return div_mean
@jit(nopython=True, fastmath=True)
def brier_score_loss(targets, probs, w=None):
"""
:purpose:
Calculates the Brier score loss between an array of discrete targets and an array of probabilities
:params:
targets : discrete input array of shape (n,)
probs : input array of predicted probabilities for sample of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
brier_score_loss : float, the Brier score loss of the targets and probs array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> prob = np.random.RandomState(seed=0).uniform(size=10000)
>>> fastdist.brier_score_loss(true, prob)
0.5097
"""
w = init_w(w, len(targets))
num, denom = 0, 0
for i in range(len(targets)):
num += (probs[i] - targets[i]) ** 2 * w[i]
denom += w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def precision_score(targets, preds, cm=None, w=None, average='binary'):
"""
:purpose:
Calculates the precision score between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix.
note that for your specific average (i.e., micro, macro, none, or binary), you must compute the confusion
matrix correctly corresponding to the one you would like to use. so, for "macro" or "none", the cm
must be computed with normalize="pred"
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
average : str, either "micro", "macro", "none", or "binary".
if "micro", computes precision globally
if "macro", take the mean of precision for each class (unweighted)
if "none", return a list of the precision for each class
if "binary", return precision in a binary classification problem
defaults to "binary", so for multi-class problems, you must change this
:returns:
precision_score : np.array, the precision score of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.precision_score(true, pred)
array([0.49879856])
"""
w = init_w(w, len(targets))
if average == 'micro':
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums
div_mean = 0.
for i in range(n):
div_mean += class_div[i]
return np.array([div_mean])
elif average == 'macro':
if cm is None:
cm = confusion_matrix(targets, preds, w=w, normalize='pred')
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums * n
class_mean = 0
for i in range(n):
class_mean += class_div[i]
return np.array([class_mean / n])
elif average == 'none':
if cm is None:
cm = confusion_matrix(targets, preds, w=w, normalize='pred')
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums * n
return class_div
elif average == 'binary':
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
return np.array([cm[1][1] / (cm[1][1] + cm[0][1])])
@jit(nopython=True, fastmath=True)
def recall_score(targets, preds, cm=None, w=None, average='binary'):
"""
:purpose:
Calculates the recall score between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix.
note that for your specific average (i.e., micro, macro, none, or binary), you must compute the confusion
matrix correctly corresponding to the one you would like to use. so, for "macro" or "none", the cm
must be computed with normalize="true"
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
average : str, either "micro", "macro", "none", or "binary".
if "micro", computes recall globally
if "macro", take the mean of recall for each class (unweighted)
if "none", return a list of the recall for each class
if "binary", return recall in a binary classification problem
defaults to "binary", so for multi-class problems, you must change this
:returns:
recall_score : np.array, the recall score of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.recall_score(true, pred)
array([0.48987217])
"""
w = init_w(w, len(targets))
if average == 'micro':
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums
div_mean = 0.
for i in range(n):
div_mean += class_div[i]
return np.array([div_mean])
elif average == 'macro':
if cm is None:
cm = confusion_matrix(targets, preds, w=w, normalize='true')
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums * n
class_mean = 0
for i in range(n):
class_mean += class_div[i]
return np.array([class_mean / n])
elif average == 'none':
if cm is None:
cm = confusion_matrix(targets, preds, w=w, normalize='true')
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums * n
return class_div
elif average == 'binary':
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
return np.array([cm[1][1] / (cm[1][1] + cm[1][0])])
@jit(nopython=True, fastmath=True)
def f1_score(targets, preds, cm=None, w=None, average='binary'):
"""
:purpose:
Calculates the F1 score between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
average : str, either "micro", "macro", "none", or "binary".
if "micro", computes F1 globally
if "macro", take the mean of F1 for each class (unweighted)
if "none", return a list of the F1 for each class
if "binary", return F1 in a binary classification problem
defaults to "binary", so for multi-class problems, you must change this
:returns:
f1_score : np.array, the F1 score of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.f1_score(true, pred)
array([0.49429507])
"""
w = init_w(w, len(targets))
precision = precision_score(targets, preds, cm=cm, w=w, average=average)
recall = recall_score(targets, preds, cm=cm, w=w, average=average)
return np.array([2]) * precision * recall / (precision + recall)
@jit(nopython=True, fastmath=True)
def log_loss(targets, probs, w=None):
"""
:purpose:
Calculates the log loss between an array of discrete targets and an array of probabilities
:params:
targets : discrete input array of shape (n,)
probs : input array of predicted probabilities for sample of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
log_loss : float, the log loss score of the targets and probs array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> prob = np.random.RandomState(seed=0).uniform(size=10000)
>>> fastdist.log_loss(true, prob)
1.0023371622966895
"""
w = init_w(w, len(targets))
num, denom = 0, 0
for i in range(len(targets)):
if targets[i] == 1:
num += -math.log(probs[i]) * w[i]
else:
num += -math.log(1 - probs[i]) * w[i]
denom += w[i]
return num / denom
| StarcoderdataPython |
1969896 | <filename>BasicConcepts/Functions/FunctionWithLiteralReturnValue.py
def GetNumber():
# Here is the return statement for our function.
# Notice the "return" keyword, a space, and then the value we want to return.
return 1
# Here we're calling our function. When functions return a value,
# we can store that value in a variable and use that variable later.
number = GetNumber()
print(number)
| StarcoderdataPython |
3235992 | # -*- coding: utf-8 -*-
# @Author: 1uci3n
# @Date: 2021-02-17 00:04:36
# @Last Modified by: 1uci3n
# @Last Modified time: 2021-02-17 00:54:18
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums) / 2
# a = [nums[0]]
# for i in range(1, len(nums)):
# if nums[i] <= a[0]:
# a.insert(0, nums[i])
# continue
# for j in range(0, len(a) - 1):
# if (nums[i] > a[j]) & (nums[i] <= a[j + 1]):
# a.insert(j+1, nums[i])
# break
# if nums[i] > a[-1]:
# a.insert(len(a), nums[i])
sums = 0
a = sorted(nums)
for i in range(n):
sums += a[i * 2]
return sums | StarcoderdataPython |
3505081 | # Try somethings out saw basically this for someonelse but then tried 4sum rather than 2sum
# Was really not good enough the other was better in terms of speed but same memory
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if (dividend, divisor) == (-2**31, -1): return 2**31-1
elif divisor == 1: return dividend
elif divisor == -1: return -dividend
elif divisor == dividend: return 1
elif divisor == -dividend: return -1
sign = 1 if (dividend > 0 and divisor > 0) or (dividend < 0 and divisor < 0) else 0
dividend = abs(dividend); divisor = abs(divisor); currentSum = divisor; quotient = 0
while dividend >= currentSum:
quotientFour = 1
while currentSum + currentSum + currentSum + currentSum <= dividend:
currentSum += currentSum + currentSum + currentSum
quotientFour += quotientFour + quotientFour + quotientFour
dividend -= currentSum
currentSum = divisor
quotient += quotientFour
return min(2**31-1, max(quotient if sign else -quotient, -2**31)) | StarcoderdataPython |
1601723 | x = [int(n) for n in input().split()]
n = x[0]
m = x[1]
a = x[2]
initSquare = int((n/a))*int((m/a))
coveringTop = 0
coveringRight = 0
if n%a != 0:
coveringTop = int((m/a))
if m%a != 0:
coveringRight = int((n/a))
if m%a != 0 and n%a != 0 :
print(initSquare + coveringTop + coveringRight +1)
else:
print(initSquare + coveringTop + coveringRight)
| StarcoderdataPython |
8055815 | import os
import sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import pglet
from pglet import Text, Textbox, Button, Checkbox
def main(page):
logged_user = Text(page.user_name)
def signout_clicked(e):
page.signout()
def page_signin(e):
logged_user.value = page.user_name
page.update()
def page_signout(e):
logged_user.value = "Not logged in"
page.update()
page.on_signin = page_signin
page.on_signout = page_signout
page.add(
logged_user,
Button('Signout', on_click=signout_clicked)
)
pglet.app("pglet-signin-test", target=main, local=True, permissions="*") | StarcoderdataPython |
23208 | counter_name = 'I0_PIN'
Size = wx.Size(1007, 726)
logfile = '/net/helix/data/anfinrud_1502/Logfiles/I0_PIN-2.log'
average_count = 1
max_value = 11
min_value = 0
start_fraction = 0.401
reject_outliers = False
outlier_cutoff = 2.5
show_statistics = True
time_window = 172800
| StarcoderdataPython |
1912081 | <gh_stars>0
import os, sys
from shutil import copyfile
class GetAlleles:
def __init__(self, option, stFile, alleles):
workingdir = os.getcwd() + '/'
galaxydir = workingdir.split('/galaxy')[0]
print workingdir
print galaxydir
copyfile(galaxydir + '/galaxy/tools/straintracer/GetMlst.class', workingdir + "/GetMlst.class")
copyfile(galaxydir + '/galaxy/tools/straintracer/PsqlWriter.class', workingdir + "/PsqlWriter.class")
#copyfile(galaxydir + '/galaxy/tools/straintracer/postgresql-9.4.1208.jre6.jar', workingdir + "/postgresql-9.4.1208.jre6.jar") #Funker ikke
#postgresql-9.4.1208.jre6.jar
#os.system('cp -r %s/tools/straintracer/*.class %s/' % (galaxydir, workingdir))
os.system('java -cp /home/jonas/galaxy/tools/straintracer/postgresql-9.4.1208.jre6.jar:. GetMlst %s %s %s' % (option, stFile, alleles))
GetAlleles(sys.argv[1], sys.argv[2], sys.argv[3]) | StarcoderdataPython |
1989775 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (c) 2019 HERE Europe B.V.
#
# SPDX-License-Identifier: MIT
# License-Filename: LICENSE
#
###############################################################################
from qgis.PyQt.QtCore import QSortFilterProxyModel, pyqtSignal
from ...models import SpaceConnectionInfo, XYZSpaceModel
from ...modules.controller import make_qt_args
from .token_ux import TokenUX
class SpaceUX(TokenUX):
""" Base dialog that contains table view of spaces + Token UX
"""
signal_space_count = pyqtSignal(object)
def __init__(self):
# these are like abstract variables
self.tableView_space = None
def config(self, token_model):
space_model = XYZSpaceModel(self)
proxy_model = QSortFilterProxyModel()
proxy_model.setSourceModel(space_model)
self.tableView_space.setModel( proxy_model)
self.tableView_space.setSelectionMode(self.tableView_space.SingleSelection)
self.tableView_space.setSelectionBehavior(self.tableView_space.SelectRows)
self.tableView_space.setSortingEnabled(True)
############# connect gui
self.tableView_space.pressed.connect(self.cb_table_row_selected)
self.btn_use.clicked.connect(self._get_space_model().reset)
TokenUX.config(self,token_model)
def _get_proxy_model(self):
return self.tableView_space.model()
def _get_space_model(self):
return self.tableView_space.model().sourceModel()
def _get_current_index(self):
index = self.tableView_space.currentIndex()
return self._get_proxy_model().mapToSource(index)
def _after_clear_token(self):
TokenUX._after_clear_token(self)
self._get_space_model().reset()
##### CALLBACK
def cb_table_row_selected(self, index):
# pending token -> gui
self.comboBox_token.setCurrentIndex(self.used_token_idx)
self.ui_valid_input()
def cb_display_spaces(self, obj, *a, **kw):
# this function can be put into dialog
# self.ui_valid_token()
self.insert_new_valid_token()
conn_info = SpaceConnectionInfo(self.conn_info)
lst_id = self.ui_display_spaces(obj)
if lst_id is not None:
for space_id in lst_id:
conn_info = SpaceConnectionInfo(conn_info)
conn_info.set_(space_id=space_id)
self.signal_space_count.emit( make_qt_args(conn_info))
def cb_display_space_count(self, conn_info, obj):
token, space_id = conn_info.get_xyz_space()
if token != self.get_input_token(): return
if obj["type"] == "StatisticsResponse":
cnt = str(obj["count"]["value"])
else:
cnt = str(obj["count"])
index = self._get_current_index()
self._get_space_model().set_feat_count(space_id, cnt)
self.tableView_space.setCurrentIndex(index)
###### UI function
def ui_display_spaces(self, obj):
return self._get_space_model().set_obj(obj)
def ui_valid_input(self, *a):
""" Returns true when token is succesfully connected and a space is selected
also enables button if condition above is met.
"""
ok = self.ui_valid_token() and self._get_current_index().isValid()
self.ui_enable_ok_button(ok)
return ok
def ui_enable_ok_button(self, flag):
raise NotImplementedError()
| StarcoderdataPython |
4925902 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import abstractmethod
import copy
from typing import Dict, List
from botbuilder.core.turn_context import TurnContext
from botbuilder.schema import InputHints, ActivityTypes
from botbuilder.dialogs.choices import (
Choice,
ChoiceFactory,
ChoiceFactoryOptions,
ListStyle,
)
from botbuilder.schema import Activity
from .prompt_options import PromptOptions
from .prompt_validator_context import PromptValidatorContext
from ..dialog_reason import DialogReason
from ..dialog import Dialog
from ..dialog_instance import DialogInstance
from ..dialog_turn_result import DialogTurnResult
from ..dialog_context import DialogContext
class Prompt(Dialog):
"""
Defines the core behavior of prompt dialogs. Extends the :class:`Dialog` base class.
.. remarks::
When the prompt ends, it returns an object that represents the value it was prompted for.
Use :meth:`DialogSet.add()` or :meth:`ComponentDialog.add_dialog()` to add a prompt to
a dialog set or component dialog, respectively.
Use :meth:`DialogContext.prompt()` or :meth:`DialogContext.begin_dialog()` to start the prompt.
If you start a prompt from a :class:`WaterfallStep` in a :class:`WaterfallDialog`, then the
prompt result will be available in the next step of the waterfall.
"""
ATTEMPT_COUNT_KEY = "AttemptCount"
persisted_options = "options"
persisted_state = "state"
def __init__(self, dialog_id: str, validator: object = None):
"""
Creates a new :class:`Prompt` instance.
:param dialog_id: Unique Id of the prompt within its parent :class:`DialogSet`
:class:`ComponentDialog`
:type dialog_id: str
:param validator: Optionally provide additional validation and re-prompting logic
:type validator: Object
"""
super(Prompt, self).__init__(dialog_id)
self._validator = validator
async def begin_dialog(
self, dialog_context: DialogContext, options: object = None
) -> DialogTurnResult:
"""
Starts a prompt dialog. Called when a prompt dialog is pushed onto the dialog stack and is being activated.
:param dialog_context: The dialog context for the current turn of the conversation
:type dialog_context: :class:`DialogContext`
:param options: Optional, additional information to pass to the prompt being started
:type options: Object
:return: The dialog turn result
:rtype: :class:`DialogTurnResult`
.. note::
The result indicates whether the prompt is still active after the turn has been processed.
"""
if not dialog_context:
raise TypeError("Prompt(): dc cannot be None.")
if not isinstance(options, PromptOptions):
raise TypeError("Prompt(): Prompt options are required for Prompt dialogs.")
# Ensure prompts have input hint set
if options.prompt is not None and not options.prompt.input_hint:
options.prompt.input_hint = InputHints.expecting_input
if options.retry_prompt is not None and not options.retry_prompt.input_hint:
options.retry_prompt.input_hint = InputHints.expecting_input
# Initialize prompt state
state = dialog_context.active_dialog.state
state[self.persisted_options] = options
state[self.persisted_state] = {}
# Send initial prompt
await self.on_prompt(
dialog_context.context,
state[self.persisted_state],
state[self.persisted_options],
False,
)
return Dialog.end_of_turn
async def continue_dialog(self, dialog_context: DialogContext):
"""
Continues a dialog.
:param dialog_context: The dialog context for the current turn of the conversation
:type dialog_context: :class:`DialogContext`
:return: The dialog turn result
:rtype: :class:`DialogTurnResult`
.. remarks::
Called when a prompt dialog is the active dialog and the user replied with a new activity.
If the task is successful, the result indicates whether the dialog is still active after
the turn has been processed by the dialog.
The prompt generally continues to receive the user's replies until it accepts the
user's reply as valid input for the prompt.
"""
if not dialog_context:
raise TypeError("Prompt(): dc cannot be None.")
# Don't do anything for non-message activities
if dialog_context.context.activity.type != ActivityTypes.message:
return Dialog.end_of_turn
# Perform base recognition
instance = dialog_context.active_dialog
state = instance.state[self.persisted_state]
options = instance.state[self.persisted_options]
recognized = await self.on_recognize(dialog_context.context, state, options)
# Validate the return value
is_valid = False
if self._validator is not None:
prompt_context = PromptValidatorContext(
dialog_context.context, recognized, state, options
)
is_valid = await self._validator(prompt_context)
if options is None:
options = PromptOptions()
options.number_of_attempts += 1
else:
if recognized.succeeded:
is_valid = True
# Return recognized value or re-prompt
if is_valid:
return await dialog_context.end_dialog(recognized.value)
if not dialog_context.context.responded:
await self.on_prompt(dialog_context.context, state, options, True)
return Dialog.end_of_turn
async def resume_dialog(
self, dialog_context: DialogContext, reason: DialogReason, result: object
) -> DialogTurnResult:
"""
Resumes a dialog.
:param dialog_context: The dialog context for the current turn of the conversation.
:type dialog_context: :class:`DialogContext`
:param reason: An enum indicating why the dialog resumed.
:type reason: :class:`DialogReason`
:param result: Optional, value returned from the previous dialog on the stack.
:type result: object
:return: The dialog turn result
:rtype: :class:`DialogTurnResult`
.. remarks::
Called when a prompt dialog resumes being the active dialog on the dialog stack,
such as when the previous active dialog on the stack completes.
If the task is successful, the result indicates whether the dialog is still
active after the turn has been processed by the dialog.
Prompts are typically leaf nodes on the stack but the dev is free to push other dialogs
on top of the stack which will result in the prompt receiving an unexpected call to
:meth:resume_dialog() when the pushed on dialog ends.
Simply re-prompt the user to avoid that the prompt ends prematurely.
"""
await self.reprompt_dialog(dialog_context.context, dialog_context.active_dialog)
return Dialog.end_of_turn
async def reprompt_dialog(self, context: TurnContext, instance: DialogInstance):
"""
Reprompts user for input.
:param context: Context for the current turn of conversation with the user
:type context: :class:`botbuilder.core.TurnContext`
:param instance: The instance of the dialog on the stack
:type instance: :class:`DialogInstance`
:return: A task representing the asynchronous operation
"""
state = instance.state[self.persisted_state]
options = instance.state[self.persisted_options]
await self.on_prompt(context, state, options, False)
@abstractmethod
async def on_prompt(
self,
turn_context: TurnContext,
state: Dict[str, object],
options: PromptOptions,
is_retry: bool,
):
"""
Prompts user for input. When overridden in a derived class, prompts the user for input.
:param turn_context: Context for the current turn of conversation with the user
:type turn_context: :class:`botbuilder.core.TurnContext`
:param state: Contains state for the current instance of the prompt on the dialog stack
:type state: :class:`Dict`
:param options: A prompt options object constructed from:meth:`DialogContext.prompt()`
:type options: :class:`PromptOptions`
:param is_retry: true if is the first time the user for input; otherwise, false
:type is_retry: bool
:return: A task representing the asynchronous operation.
"""
@abstractmethod
async def on_recognize(
self,
turn_context: TurnContext,
state: Dict[str, object],
options: PromptOptions,
):
"""
Recognizes the user's input.
:param turn_context: Context for the current turn of conversation with the user
:type turn_context: :class:`botbuilder.core.TurnContext`
:param state: Contains state for the current instance of the prompt on the dialog stack
:type state: :class:`Dict`
:param options: A prompt options object constructed from :meth:`DialogContext.prompt()`
:type options: :class:`PromptOptions`
:return: A task representing the asynchronous operation.
.. note::
When overridden in a derived class, attempts to recognize the user's input.
"""
def append_choices(
self,
prompt: Activity,
channel_id: str,
choices: List[Choice],
style: ListStyle,
options: ChoiceFactoryOptions = None,
) -> Activity:
"""
Composes an output activity containing a set of choices.
:param prompt: The prompt to append the user's choice to
:type prompt:
:param channel_id: Id of the channel the prompt is being sent to
:type channel_id: str
:param: choices: List of choices to append
:type choices: :class:`List`
:param: style: Configured style for the list of choices
:type style: :class:`ListStyle`
:param: options: Optional formatting options to use when presenting the choices
:type style: :class:`ChoiceFactoryOptions`
:return: A task representing the asynchronous operation
.. remarks::
If the task is successful, the result contains the updated activity.
When overridden in a derived class, appends choices to the activity when the user
is prompted for input. This is an helper function to compose an output activity
containing a set of choices.
"""
# Get base prompt text (if any)
text = prompt.text if prompt is not None and prompt.text else ""
# Create temporary msg
# TODO: fix once ChoiceFactory complete
def inline() -> Activity:
return ChoiceFactory.inline(choices, text, None, options)
def list_style() -> Activity:
return ChoiceFactory.list_style(choices, text, None, options)
def suggested_action() -> Activity:
return ChoiceFactory.suggested_action(choices, text)
def hero_card() -> Activity:
return ChoiceFactory.hero_card(choices, text)
def list_style_none() -> Activity:
activity = Activity(type=ActivityTypes.message)
activity.text = text
return activity
def default() -> Activity:
return ChoiceFactory.for_channel(channel_id, choices, text, None, options)
# Maps to values in ListStyle Enum
switcher = {
0: list_style_none,
1: default,
2: inline,
3: list_style,
4: suggested_action,
5: hero_card,
}
msg = switcher.get(int(style.value), default)()
# Update prompt with text, actions and attachments
if prompt:
# clone the prompt the set in the options (note ActivityEx has Properties so this is the safest mechanism)
prompt = copy.copy(prompt)
prompt.text = msg.text
if (
msg.suggested_actions is not None
and msg.suggested_actions.actions is not None
and msg.suggested_actions.actions
):
prompt.suggested_actions = msg.suggested_actions
if msg.attachments:
if prompt.attachments:
prompt.attachments.extend(msg.attachments)
else:
prompt.attachments = msg.attachments
return prompt
# TODO: Update to InputHints.ExpectingInput;
msg.input_hint = None
return msg
| StarcoderdataPython |
11304637 | <filename>src/scraper/validahyphe/__init__.py
from twisted.internet import reactor
from txjsonrpc.web.jsonrpc import Proxy
version = '0.3'
version_info = 'validalab-scraping'
def print_value(value):
import pprint
pprint.pprint(value)
def print_error(error):
print(' !! ERROR: ', error)
def shutdown():
reactor.stop()
class HypheConnection:
def __init__(self, address):
self.proxy = Proxy(address)
def run_command(self, command, args):
d = self.proxy.callRemote(command, *args)
d.addCallback(print_value).addErrback(print_error)
d.addCallback(shutdown)
reactor.run()
| StarcoderdataPython |
11311629 | from __future__ import print_function
import math
key = int(math.pi * 1e14)
text = input("Enter text : ")
values = reverse = []
def encryptChar(target):
#algorithm
target = (((target + 449 ) / key) - 449)
return target
def decryptChar(target):
target = (((target + 449) / key) - 42)
return target
def encrypt(inputText):
colValues = []
for inp in inputText:
current = ord(inp)
current = encryptChar(current)
colValues.append(current)
return colValues
def readAndDecrypt(filename):
file = open(filename, "r")
data = file.read()
dataListInt = []
actualData = []
dataList = data.split(" ")
dataList.remove(' ')
dataListInt = [float(data) for data in dataList]
for data in dataList:
current1 = int(decryptChar(data))
current1 = chr(current1)
actualData.append(current1)
file.close()
return actualData
def readAndEncrypt(filename):
file = open(filename, "r")
data = file.read()
dataList = list(data)
encryptedList = list()
encryptedListStr = list()
for data in dataList:
current = ord(data)
current = encryptChar(current)
encryptedList.append()
file.close()
return encryptedList
def readAndEncryptSave(inpFile, outFile):
encList = readAndEncrypt(inpFile)
output = open(outFile, "w")
for enc in encList:
output.write(str(enc)+ " ")
output.close()
def readAndDecryptSave(inpFile, outFile):
dencList = readAndDecrypt(inpFile)
output = open(outFile, "w")
for dec in dencList:
output.write(str(dec))
output.close()
#encryption
for t in text:
current = ord(t)
current = encryptChar(current)
values.append(current)
#decryption
for v in values:
current = int(decryptChar(v))
current = chr(current)
reverse.append(current)
print(reverse)
#save data in file
output = open("encrypted.txt", "w")
for v in values:
output.write(str(v)+ " ")
output.close()
#read
print(readAndDecrypt("encrtpyed.txt")) | StarcoderdataPython |
5108020 | # Copyright 2018 eBay Inc.
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netforce.api.v2 import attributes as netforce_attr
from netforce.common import netforce_exceptions as netforce_exc
from netforce.plugins.common import netforce_constants
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.api.v2 import resource as resource_creator
from neutron import manager
from neutron import wsgi
import urlparse
# Resource names and their collections
SUBNETS = 'subnets'
SUBNET = 'subnet'
PORTS = 'ports'
PORT = 'port'
DEVICES = 'devices'
DEVICE = 'device'
DEVICE_TYPES = 'devicetypes'
DEVICE_TYPE = 'devicetype'
VLANS = 'vlans'
VLAN = 'vlan'
BRIDGEGROUPS = 'bridgegroups'
BRIDGEGROUP = 'bridgegroup'
VPCS = 'vpcs'
VPC = 'vpc'
VLANPORTASSOCIATIONS = 'vlanportassociations'
VLANPORTASSOCIATION = 'vlanportassociation'
BUBBLES = 'bubbles'
BUBBLE = 'bubble'
VRFS = 'vrfs'
VRF = 'vrf'
# Defining resource payloads
RESOURCE_ATTRIBUTE_MAP = {
SUBNETS: {
'name': {
'allow_post': True,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'cidr': {
'allow_post': True,
'default': None,
'validate': {'type:subnet': None},
'is_visible': True
},
'gateway_ip': {
'allow_post': True,
'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True
},
'broadcast_ip': {
'allow_post': True,
'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True
},
'netmask': {
'allow_post': True,
'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True
},
'vlan_id': {
'allow_put': False,
'allow_post': True,
'validate': {'type:uuid': None},
'is_visible': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
},
'start_ip': {
'allow_post': False,
'allow_put': False,
'default': {},
'validate': {'type:string': None},
'is_visible': True
},
'end_ip': {
'allow_post': False,
'allow_put': False,
'default': {},
'validate': {'type:string': None},
'is_visible': True
},
'reserve_ip_count': {
'allow_post': True,
'allow_put': False,
'default': 0,
'is_visible': True,
'convert_to': attr.convert_to_int,
}
},
PORTS: {
'name': {
'allow_post': True,
'allow_put': False,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'admin_status': {
'allow_post': True,
'allow_put': True,
'validate': {'type:values': ['ACTIVE', 'SUSPENDED']},
'is_visible': True
},
'label': {
'allow_post': False,
'allow_put': True,
'validate': {'type:string': None},
'is_visible': True
},
'asset_id': {
'allow_post': True,
'allow_put': True,
'default': None,
'is_visible': True
},
'switch_port_mode': {
'allow_post': True,
'allow_put': True,
'validate': {'type:values': ['trunk', 'native', 'access', 'none']},
'is_visible': True
},
'description': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'is_visible': True
},
'device_id': {
'allow_put': False,
'allow_post': True,
'validate': {'type:uuid': None},
'is_visible': True
},
'vlans': {
'allow_post': False,
'allow_put': True,
'convert_to': attr.convert_to_list,
'is_visible': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
},
'mac_address': {
'allow_post': False,
'allow_put': True,
'validate': {'type:mac_address_or_none': None},
'is_visible': True
},
'ticket': {
'allow_post': False,
'allow_put': False,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'device_data': {
'allow_post': False,
'allow_put': False,
'default': None,
'validate': {'type:dict': None},
'is_visible': True
}
},
DEVICES: {
'name': {
'allow_post': True,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'type': {
'allow_put': True,
'allow_post': True,
'validate': {'type:string': None},
'is_visible': True
},
'management_ip': {
'allow_put': True,
'allow_post': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True
},
'username': {
'allow_put': True,
'allow_post': True,
'validate': {'type:string': None},
'is_visible': True
},
'password': {
'allow_put': True,
'allow_post': True,
'validate': {'type:string': None},
'is_visible': False
},
'description': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'is_visible': True
},
'bridge_group_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True
},
'ports': {
'allow_put': False,
'allow_post': False,
'convert_to': attr.convert_to_list,
'is_visible': True
},
'os_type': {
'allow_put': False,
'allow_post': True,
'validate': {'type:values': ['eos', 'junos', 'nxos', 'ios']},
'is_visible': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
},
'bubble_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
}
},
DEVICE_TYPES: {
'name': {
'allow_post': True,
'allow_put': False,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'type': {
'allow_put': True,
'allow_post': True,
'validate': {'type:string': None},
'is_visible': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
},
'description': {
'allow_post': False,
'allow_put': True,
'validate': {'type:string': None},
'is_visible': True
}
},
VLANS: {
'name': {
'allow_post': True,
'allow_put': False,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'tag': {
'allow_post': True,
'allow_put': True,
'convert_to': netforce_attr.convert_to_int_if_not_none,
'is_visible': True,
'default': None,
},
'bridge_group_name': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'is_visible': True
},
'bridge_group_id': {
'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True
},
'vpc_id': {
'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True
},
'admin_status': {
'allow_post': True,
'allow_put': True,
'validate': {'type:values': ['ACTIVE', 'SUSPENDED']},
'is_visible': True
},
'vpc_name': {
'allow_post': True,
'allow_put': False,
'is_visible': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
}
},
BRIDGEGROUPS: {
'name': {
'allow_post': True,
'allow_put': False,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'description': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'is_visible': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
}
},
VPCS: {
'name': {
'allow_post': True,
'allow_put': False,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'description': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'is_visible': True
},
'label': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'is_visible': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
}
},
BUBBLES: {
'name': {
'allow_post': True,
'allow_put': True,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
}
},
VRFS: {
'name': {
'allow_post': True,
'allow_put': True,
'default': None,
'validate': {'type:string': None},
'is_visible': True
},
'id': {
'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
},
'bubble_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
},
'vpc_id': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
},
'description': {
'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True
}
}
}
class NetForceController(base.Controller, wsgi.Controller):
def __init__(self, resource, collection, res_attr_map, plugin=None):
if not plugin:
self._plugin = manager.NeutronManager.get_plugin()
else:
self._plugin = plugin
super(NetForceController, self).__init__(
self._plugin, collection, resource, res_attr_map)
def create(self, request, **kwargs):
# TODO(aginwala): Make sure to enforce policy enforcement in future.
body = kwargs.get('body')
kwargs.pop('body', None)
url = request.url
params = urlparse.parse_qs(urlparse.urlparse(url).query,
keep_blank_values=True)
if params:
if 'skip_device' in params:
kwargs.update({"skip_device": True})
# Note: patch_primary_junos_subnets is for temporary to patch
# all junos TORs:
if 'patch_primary_junos_subnets' in params:
kwargs.update({"patch_primary_junos_subnets": True})
if 'one_subnet_only' in params:
kwargs.update(
{"one_subnet_only": params['one_subnet_only'][0]})
# Creates a new instance of the requested entity.
# Over-riding upstream neutron stable/juno base.py
body = base.Controller.prepare_request_body(
request.context, body, True, self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
obj_creator = getattr(self._plugin, action)
kwargs.update({self._resource: body})
obj = obj_creator(request.context, **kwargs)
return {self._resource: self._view(request.context,
obj)}
def show(self, request, **kwargs):
"""Returns detailed information about the requested entity."""
dsid = kwargs.pop('id', None)
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
return {self._resource: self._view(request.context,
self._item(request, dsid,
do_authz=False,
field_list=field_list,
parent_id=None),
fields_to_strip=added_fields)}
def update(self, request, **kwargs):
# TODO(aginwala): Make sure to enforce policy enforcement in future.
dsid = kwargs.pop('id', None)
body = kwargs.pop('body', None)
url = request.url
params = urlparse.parse_qs(urlparse.urlparse(url).query,
keep_blank_values=True)
if params:
if 'skip_mac_check' in params:
kwargs.update({"check_mac": False})
if 'skip_cms_check' in params:
kwargs.update({"check_cms": False})
try:
payload = body.copy()
except AttributeError:
msg = "Invalid format: %s" % request.body
raise netforce_exc.BadRequest(resource='body', msg=msg)
payload['id'] = dsid
body = base.Controller.prepare_request_body(
request.context, body, False, self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
obj_updater = getattr(self._plugin, action)
kwargs.update({self._resource: body})
obj = obj_updater(request.context, dsid, **kwargs)
result = {self._resource: self._view(request.context, obj)}
return result
def delete(self, request, **kwargs):
"""Deletes the specified entity."""
# TODO(aginwala): Make sure to enforce policy enforcement in future.
dsid = kwargs.pop('id', None)
action = self._plugin_handlers[self.DELETE]
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, dsid, **kwargs)
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
return self._items(request, True, None)
def create_port_resource():
controller = resource_creator.\
Resource(NetForceController(PORT, PORTS,
RESOURCE_ATTRIBUTE_MAP[PORTS]),
faults=base.FAULT_MAP)
resource = extensions.\
ResourceExtension(PORTS,
controller,
path_prefix=netforce_constants.
COMMON_PREFIXES[netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(PORTS))
return resource
def create_device_resource():
controller = resource_creator.\
Resource(NetForceController(DEVICE, DEVICES,
RESOURCE_ATTRIBUTE_MAP[DEVICES]),
faults=base.FAULT_MAP)
resource = extensions.\
ResourceExtension(DEVICES,
controller,
path_prefix=netforce_constants.COMMON_PREFIXES[
netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(DEVICES))
return resource
def create_device_type_resource():
controller = resource_creator.\
Resource(NetForceController(DEVICE_TYPE, DEVICE_TYPES,
RESOURCE_ATTRIBUTE_MAP[DEVICE_TYPES]),
faults=base.FAULT_MAP)
resource = extensions.\
ResourceExtension(DEVICE_TYPES,
controller,
path_prefix=netforce_constants.COMMON_PREFIXES[
netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(DEVICE_TYPES))
return resource
def create_vlan_resource():
controller = resource_creator.\
Resource(NetForceController(VLAN, VLANS,
RESOURCE_ATTRIBUTE_MAP[VLANS]),
faults=base.FAULT_MAP)
resource = extensions.\
ResourceExtension(VLANS,
controller,
path_prefix=netforce_constants.
COMMON_PREFIXES[netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(VLANS))
return resource
def create_vpc_resource():
controller = resource_creator.\
Resource(NetForceController(VPC, VPCS,
RESOURCE_ATTRIBUTE_MAP[VPCS]),
faults=base.FAULT_MAP)
resource = extensions.\
ResourceExtension(VPCS,
controller,
path_prefix=netforce_constants.
COMMON_PREFIXES[netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(VPCS))
return resource
def create_bg_resource():
controller = resource_creator.\
Resource(NetForceController(BRIDGEGROUP, BRIDGEGROUPS,
RESOURCE_ATTRIBUTE_MAP[BRIDGEGROUPS]),
faults=base.FAULT_MAP)
resource = extensions\
.ResourceExtension(BRIDGEGROUPS,
controller,
path_prefix=netforce_constants.
COMMON_PREFIXES[netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(BRIDGEGROUPS))
return resource
def create_vlanportassociation_resource():
controller = resource_creator.\
Resource(NetForceController(VLANPORTASSOCIATION,
VLANPORTASSOCIATIONS,
RESOURCE_ATTRIBUTE_MAP[
VLANPORTASSOCIATIONS]
),
faults=base.FAULT_MAP)
resource = extensions.\
ResourceExtension(VLANPORTASSOCIATIONS,
controller,
path_prefix=netforce_constants.
COMMON_PREFIXES[netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP
.get(VLANPORTASSOCIATIONS))
return resource
def create_subnet_resource():
controller = resource_creator. \
Resource(NetForceController(SUBNET, SUBNETS,
RESOURCE_ATTRIBUTE_MAP[SUBNETS]),
faults=base.FAULT_MAP)
resource = extensions. \
ResourceExtension(SUBNETS, controller, path_prefix=netforce_constants.
COMMON_PREFIXES[netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(SUBNETS))
return resource
def create_bubble_resource():
controller = resource_creator. \
Resource(NetForceController(BUBBLE, BUBBLES,
RESOURCE_ATTRIBUTE_MAP[BUBBLES]),
faults=base.FAULT_MAP)
resource = extensions. \
ResourceExtension(BUBBLES, controller, path_prefix=netforce_constants.
COMMON_PREFIXES[netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(BUBBLES))
return resource
def create_vrf_resource():
controller = resource_creator. \
Resource(NetForceController(VRF, VRFS,
RESOURCE_ATTRIBUTE_MAP[VRFS]),
faults=base.FAULT_MAP)
resource = extensions. \
ResourceExtension(VRFS, controller, path_prefix=netforce_constants.
COMMON_PREFIXES[netforce_constants.NETFORCE],
attr_map=RESOURCE_ATTRIBUTE_MAP.get(VRFS))
return resource
class Netforceext(extensions.ExtensionDescriptor):
"""Netforce extension."""
@classmethod
def get_name(cls):
return "netforce"
@classmethod
def get_alias(cls):
return "netforce"
@classmethod
def get_description(cls):
return "An extension for netforce"
@classmethod
def get_namespace(cls):
# FIXME(lhuang8): netforce namespace?
return "http://docs.openstack.org/netforce/v2.0"
@classmethod
def get_updated(cls):
return "2016-08-17T10:00:00-00:00"
@classmethod
def get_resources(cls):
resources = []
resources.append(create_port_resource())
resources.append(create_device_resource())
resources.append(create_device_type_resource())
resources.append(create_vlan_resource())
resources.append(create_bg_resource())
resources.append(create_vpc_resource())
resources.append(create_subnet_resource())
resources.append(create_bubble_resource())
resources.append(create_vrf_resource())
return resources
| StarcoderdataPython |
5074627 | <filename>project_version/services.py
"""
Provide services for command line interface.
"""
from github import Github
from project_version.abstarct import AbstractCheckProjectVersion
from project_version.utils import (
get_non_capitalized_pull_request_title,
parse_project_version,
)
class GitHubCheckProjectVersion(AbstractCheckProjectVersion):
"""
GitHub check a project version service.
"""
def get_project_versions(self):
"""
Get project versions for base and head branches.
Returns:
Base and head branches project versions as a tuple of strings.
"""
github = Github(login_or_token=self.access_token)
repository = github.get_repo(full_name_or_id=f'{self.organization}/{self.repository}')
base_branch_project_version_file = repository.get_contents('.project-version', ref=self.base_branch)
base_branch_project_version = base_branch_project_version_file.decoded_content.decode().replace('\n', '')
head_branch_project_version_file = repository.get_contents('.project-version', ref=self.head_branch)
head_branch_project_version = head_branch_project_version_file.decoded_content.decode().replace('\n', '')
return base_branch_project_version, head_branch_project_version
class GitHubBumpProjectVersion:
"""
GitHub bump a project version service.
"""
def __init__(self, organization, repository, base_branch, head_branch, access_token):
"""
Construct the object.
Arguments:
organization (str): the provider's organization name.
repository (str): the provider's repository name.
base_branch (str): a branch to get a project version from. Usually, a default branch.
head_branch (str): a branch to push bumped project version to. Usually, a feature branch.
access_token (str): the provider's API access token.
"""
self.organization = organization
self.repository = repository
self.base_branch = base_branch
self.head_branch = head_branch
self.access_token = access_token
def call(self):
"""
Bump a project version.
Returns:
True and None, if project version's bumping succeed.
Otherwise, False and reason as a string.
"""
github = Github(login_or_token=self.access_token)
repository = github.get_repo(full_name_or_id=f'{self.organization}/{self.repository}')
base_branch_project_version_file = repository.get_contents('.project-version', ref=self.base_branch)
base_branch_project_version = base_branch_project_version_file.decoded_content.decode().replace('\n', '')
(
base_branch_major_version,
base_branch_minor_version,
base_branch_patch_version,
) = parse_project_version(base_branch_project_version)
increased_base_branch_patch_version = int(base_branch_patch_version) + 1
increased_base_branch_project_version = \
f'{base_branch_major_version}.{base_branch_minor_version}.{increased_base_branch_patch_version}'
repository.update_file(
path='.project-version',
message=f'Bump project version to {increased_base_branch_project_version}',
content=f'{increased_base_branch_project_version}\n',
sha=repository.get_contents('.project-version', ref=self.head_branch).sha,
branch=self.head_branch,
)
return True, None
class GitHubRelease:
"""
GitHub release service.
"""
def __init__(self, organization, repository, branch, project_version, access_token):
"""
Construct the object.
Arguments:
organization (str): the provider's organization name.
repository (str): the provider's repository name.
branch (str): branch to make a release for.
project_version (str): a project version to make a release with.
access_token (str): the provider's API access token.
"""
self.organization = organization
self.repository = repository
self.branch = branch
self.project_version = project_version
self.access_token = access_token
def call(self):
"""
Make a release based on a project version.
Returns:
True and None, if release succeed.
Otherwise, False and reason as a string.
"""
github = Github(login_or_token=self.access_token)
repository = github.get_repo(full_name_or_id=f'{self.organization}/{self.repository}')
target_commit = repository.get_commit(sha=self.branch)
commit_message = target_commit.commit.message
release_message = f'v{self.project_version}: {get_non_capitalized_pull_request_title(commit_message)}'
repository.create_git_release(
tag=f'v{self.project_version}',
name=release_message,
message='\u2800', # https://www.compart.com/en/unicode/U+2800
target_commitish=self.branch,
)
return True, None
| StarcoderdataPython |
6415126 |
class ShipRocketException(Exception):
"""
Custom Exception thrown
"""
def __init__(self, message, code: int = None):
super(ShipRocketException, self).__init__(message, code)
self.code = code
self.message = message
| StarcoderdataPython |
4932109 | from three_state_totalistic_ca import TotalisticCell1D
class TestThreeStateTotalisticCA:
def test_step(self):
rule_num = 777
gen_count = 4
ca = TotalisticCell1D(rule_num, gen_count)
ca.start_single()
for i in range(3):
ca.step()
expected_values = \
[[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 1, 2, 1, 2, 1, 0, 0,],
[0, 1, 1, 0, 0, 0, 1, 1, 0,]]
assert len(ca.array) == len(expected_values)
for i in range(len(expected_values)):
assert len(expected_values[i]) == len(ca.array[i])
for j in range(len(expected_values[i])):
assert expected_values[i][j] == ca.array[i][j]
| StarcoderdataPython |
9739551 | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import random, math, os, time
from torch.optim.lr_scheduler import StepLR, ExponentialLR
import numpy as np
np.set_printoptions(threshold=np.inf)
import pandas as pd
import matplotlib.pyplot as plt
from models.DualHead_NoShare import Shared_Encoder, Cross_Attention, Decoder, DualSSIM
from utils.early_stopping import EarlyStopping
from utils.prepare_USA_EMS import test_usa_single_station
# from utils.prepare_QLD import test_qld_single_station
from utils.support import *
from utils.metrics import RMSLE
from utils.adamw import AdamW
from utils.dilate_loss import dilate_loss
from utils.cyclic_scheduler import CyclicLRWithRestarts
from warmup_scheduler import GradualWarmupScheduler
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from tslearn.metrics import dtw, dtw_path
# set the random seeds for reproducability
SEED = 1234
random.seed(SEED)
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(model, optimizer, criterion, X_train_left, X_train_right, y_train):
iter_per_epoch = int(np.ceil(X_train_left.shape[0] * 1. / BATCH_SIZE))
iter_losses = np.zeros(EPOCHS * iter_per_epoch)
n_iter = 0
perm_idx = np.random.permutation(X_train_left.shape[0])
# train for each batch
for t_i in range(0, X_train_left.shape[0], BATCH_SIZE):
batch_idx = perm_idx[t_i:(t_i + BATCH_SIZE)]
x_train_left_batch = np.take(X_train_left, batch_idx, axis=0)
x_train_right_batch = np.take(X_train_right, batch_idx, axis=0)
y_train_batch = np.take(y_train, batch_idx, axis=0)
loss = train_iteration(model, optimizer, criterion, CLIP,
x_train_left_batch, x_train_right_batch,
y_train_batch)
iter_losses[t_i // BATCH_SIZE] = loss
n_iter += 1
return np.mean(iter_losses[range(0, iter_per_epoch)])
def train_iteration(model, optimizer, criterion, clip, X_train_left,
X_train_right, y_train):
model.train()
optimizer.zero_grad()
X_train_left = np.transpose(X_train_left, [1, 0, 2])
X_train_right = np.transpose(X_train_right, [1, 0, 2])
y_train = np.transpose(y_train, [1, 0, 2])
X_train_left_tensor = numpy_to_tvar(X_train_left)
X_train_right_tensor = numpy_to_tvar(X_train_right)
y_train_tensor = numpy_to_tvar(y_train)
output,atten = model(X_train_left_tensor, X_train_right_tensor, y_train_tensor)
# output = output.view(-1)
# y_train_tensor = y_train_tensor.view(-1)
output = output.permute(1,0,2)
y_train_tensor = y_train_tensor.permute(1,0,2)
loss_mse,loss_shape,loss_temporal = torch.tensor(0),torch.tensor(0),torch.tensor(0)
loss, loss_shape, loss_temporal = dilate_loss(y_train_tensor,output,0.85, 0.01, device)
# loss = criterion(output, y_train_tensor)
# print(loss.size())
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
# # for AdamW+Cyclical Learning Rate
# scheduler.batch_step()
# loss_meter.add(loss.item())
return loss.item()
### evaluate
def evaluate(model, criterion, X_test_left, X_test_right, y_test):
epoch_loss = 0
iter_per_epoch = int(np.ceil(X_test_left.shape[0] * 1. / BATCH_SIZE))
iter_losses = np.zeros(EPOCHS * iter_per_epoch)
iter_multiloss = [np.zeros(EPOCHS * iter_per_epoch), np.zeros(EPOCHS * iter_per_epoch),np.zeros(EPOCHS * iter_per_epoch),np.zeros(EPOCHS * iter_per_epoch)]
perm_idx = np.random.permutation(X_test_left.shape[0])
n_iter = 0
with torch.no_grad():
for t_i in range(0, X_test_left.shape[0], BATCH_SIZE):
batch_idx = perm_idx[t_i:(t_i + BATCH_SIZE)]
x_test_left_batch = np.take(X_test_left, batch_idx, axis=0)
x_test_right_batch = np.take(X_test_right, batch_idx, axis=0)
y_test_batch = np.take(y_test, batch_idx, axis=0)
loss, mae, rmsle, rmse, loss_tdi = evaluate_iteration(model, criterion, x_test_left_batch,
x_test_right_batch, y_test_batch)
iter_losses[t_i // BATCH_SIZE] = loss
iter_multiloss[0][t_i // BATCH_SIZE] = mae
iter_multiloss[1][t_i // BATCH_SIZE] = rmsle
iter_multiloss[2][t_i // BATCH_SIZE] = rmse
iter_multiloss[3][t_i // BATCH_SIZE] = loss_tdi
n_iter += 1
return np.mean(iter_losses[range(0, iter_per_epoch)]), np.mean(iter_multiloss[0][range(0, iter_per_epoch)]), np.mean(
iter_multiloss[1][range(0, iter_per_epoch)]), np.mean(iter_multiloss[2][range(0, iter_per_epoch)]),np.mean(iter_multiloss[3][range(0, iter_per_epoch)])
def evaluate_iteration(model, criterion, X_test_left, X_test_right, y_test):
model.eval()
x_test_left = np.transpose(X_test_left, [1, 0, 2])
x_test_right = np.transpose(X_test_right, [1, 0, 2])
y_test = np.transpose(y_test, [1, 0, 2])
x_test_left_tensor = numpy_to_tvar(x_test_left)
x_test_right_tensor = numpy_to_tvar(x_test_right)
y_test_tensor = numpy_to_tvar(y_test)
output,atten = model(x_test_left_tensor, x_test_right_tensor, y_test_tensor, 0)
# output = output.view(-1)
# y_test_tensor = y_test_tensor.view(-1)
loss = criterion(output, y_test_tensor)
loss_mse, loss_dtw, loss_tdi = 0,0,0
loss_mae, loss_RMSLE, loss_RMSE = 0,0,0
for k in range(BATCH_SIZE):
target_k_cpu = y_test_tensor[:,k,0:1].view(-1).detach().cpu().numpy()
output_k_cpu = output[:,k,0:1].view(-1).detach().cpu().numpy()
loss_dtw += dtw(target_k_cpu,output_k_cpu)
path, sim = dtw_path(target_k_cpu, output_k_cpu)
Dist = 0
for i,j in path:
Dist += (i-j)*(i-j)
loss_tdi += Dist / (N_output*N_output)
loss_mae += mean_absolute_error(target_k_cpu,output_k_cpu)
loss_RMSLE += np.sqrt(mean_squared_error(target_k_cpu,output_k_cpu))
loss_RMSE += np.sqrt(mean_squared_error(target_k_cpu,output_k_cpu))
loss_dtw = loss_dtw / BATCH_SIZE
loss_tdi = loss_tdi / BATCH_SIZE
loss_mae = loss_mae / BATCH_SIZE
loss_RMSLE = loss_RMSLE / BATCH_SIZE
loss_RMSE = loss_RMSE / BATCH_SIZE
# # metric
# output_numpy = output.cpu().data.numpy()
# y_test_numpy = y_test_tensor.cpu().data.numpy()
# loss_mae = mean_absolute_error(y_test_numpy,output_numpy)
# loss_RMSLE = np.sqrt(mean_squared_error(y_test_numpy,output_numpy))
# loss_RMSE = np.sqrt(mean_squared_error(y_test_numpy,output_numpy))
# test_loss_meter.add(loss.item())
# plot_result(output, y_test_tensor)
# show_attention(x_test_left_tensor, x_test_right_tensor,output,atten)
# plt.show()
return loss.item(), loss_mae, loss_RMSLE, loss_RMSE, loss_dtw
def predict_ts(model, X_test_left, X_test_right, scaler_y, max_gap_size=6, BATCH_SIZE=1,device=device):
model.eval()
with torch.no_grad():
x_test_left = np.transpose(X_test_left, [1, 0, 2])
x_test_right = np.transpose(X_test_right, [1, 0, 2])
empty_y_tensor = torch.zeros(max_gap_size, BATCH_SIZE,
1).to(device)
x_test_left_tensor = numpy_to_tvar(x_test_left)
x_test_right_tensor = numpy_to_tvar(x_test_right)
output,atten = model(x_test_left_tensor, x_test_right_tensor, empty_y_tensor, 0)
output = output.view(-1)
# scalar
output_numpy = output.cpu().data.numpy()
output_numpy_origin = scaler_y.inverse_transform(
output_numpy.reshape(-1, 1))
return output_numpy_origin, output_numpy
if __name__ == "__main__":
# model hyperparameters
INPUT_DIM = 3
OUTPUT_DIM = 1
ENC_HID_DIM = 50
DEC_HID_DIM = 50
ENC_DROPOUT = 0.1
DEC_DROPOUT = 0.1
ECN_Layers = 1
DEC_Layers = 1
LR = 0.001 # learning rate
CLIP = 1
EPOCHS = 500
BATCH_SIZE = 10
N_output=2
## Different test data
(x_train, y_train), (x_test, y_test), (scaler_x, scaler_y) = test_usa_single_station()
print('split train/test array')
x_test_list = np.split(x_test, [10, 16], axis=1)
x_train_list = np.split(x_train, [10, 16], axis=1)
# Split input into two
X_train_left = x_train_list[0]
X_train_right = x_train_list[2]
X_test_left = x_test_list[0]
X_test_right = x_test_list[2]
print('X_train_left:{}'.format(X_train_left.shape))
print('X_train_right:{}'.format(X_train_right.shape))
print('X_test_left:{}'.format(X_test_left.shape))
print('X_test_right:{}'.format(X_test_right.shape))
# fit for batchsize check dataloader droplast
X_train_left = X_train_left[:3820]
X_train_right = X_train_right[:3820]
X_test_left = X_test_left[:940]
X_test_right = X_test_right[:940]
# # ##0103
# X_train_left = X_train_left[:4930]
# X_train_right = X_train_right[:4930]
# X_test_left = X_test_left[:1600]
# X_test_right = X_test_right[:1600]
# Model
cross_attn = Cross_Attention(ENC_HID_DIM, DEC_HID_DIM)
enc = Shared_Encoder(INPUT_DIM, ENC_HID_DIM, DEC_HID_DIM, ECN_Layers,
DEC_Layers, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_Layers,
DEC_DROPOUT, cross_attn)
model = DualSSIM(enc, dec, device).to(device)
model.apply(init_weights)
print(model)
print(f'The model has {count_parameters(model):,} trainable parameters')
# Adam
optimizer = torch.optim.Adam(model.parameters(), lr=LR, betas=(0.9, 0.999))
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
# step_size=10,
# gamma=0.1)
# optimizer = AdamW(model.parameters(), lr=1e-3, weight_decay=1e-5)
# scheduler = CyclicLRWithRestarts(optimizer, BATCH_SIZE, 3202, restart_period=5, t_mult=1.2, policy="cosine")
# warmup
# scheduler_cosine = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, EPOCHS)
scheduler_steplr = StepLR(optimizer, step_size=10, gamma=0.1)
scheduler_warmup = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=5)
criterion = nn.MSELoss()
# # visulization visdom
# vis = Visualizer(env='attention')
# loss_meter = meter.AverageValueMeter()
# test_loss_meter = meter.AverageValueMeter()
# Early Stopping
# initialize the early_stopping object
# early stopping patience; how long to wait after last time validation loss improved.
patience = 10
early_stopping = EarlyStopping(output_path='checkpoints/EMS_Temp6_1012.pt',
patience=patience,
verbose=True)
optimizer.zero_grad()
optimizer.step()
# best_valid_loss = float('inf')
# for epoch in range(EPOCHS):
# scheduler_warmup.step(epoch)
# train_epoch_losses = np.zeros(EPOCHS)
# evaluate_epoch_losses = np.zeros(EPOCHS)
# # loss_meter.reset()
# # print('Epoch:', epoch, 'LR:', scheduler.get_lr())
# start_time = time.time()
# train_loss = train(model, optimizer, criterion, X_train_left,
# X_train_right, y_train)
# valid_loss,test_mae, test_rmsle, test_rmse, test_tdi = evaluate(model, criterion, X_test_left, X_test_right,
# y_test)
# end_time = time.time()
# # # visulization
# # vis.plot_many_stack({'train_loss': loss_meter.value()[0], 'test_loss': test_loss_meter.value()[0]})
# train_epoch_losses[epoch] = train_loss
# evaluate_epoch_losses[epoch] = valid_loss
# epoch_mins, epoch_secs = epoch_time(start_time, end_time)
# # early_stopping needs the validation loss to check if it has decresed,
# # and if it has, it will make a checkpoint of the current model
# early_stopping(valid_loss, model)
# if early_stopping.early_stop:
# print("Early stopping")
# break
# print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
# print(
# f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}'
# )
# print(
# f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}'
# )
# print(f'| MAE: {test_mae:.4f} | Test PPL: {math.exp(test_mae):7.4f} |')
# print(f'| RMSLE: {test_rmsle:.4f} | Test PPL: {math.exp(test_rmsle):7.4f} |')
# print(f'| RMSE: {test_rmse:.4f} | Test PPL: {math.exp(test_rmse):7.4f} |')
# print(f'| TDI: {test_tdi:.4f} | Test PPL: {math.exp(test_tdi):7.4f} |')
# # # prediction
# # get one sample for attention visulization
# X_test_left = X_test_left[5:6,:,:]
# X_test_right = X_test_right[5:6,:,:]
# y_test = y_test[5:6,:,:]
#######
model.load_state_dict(torch.load('checkpoints/EMS_Temp6_1012.pt'))
test_loss, test_mae, test_rmsle, test_rmse, test_tdi = evaluate(model, criterion, X_test_left,X_test_right, y_test)
print(f'| Test Loss: {test_loss:.4f} | Test PPL: {math.exp(test_loss):7.4f} |')
print(f'| MAE: {test_mae:.4f} | Test PPL: {math.exp(test_mae):7.4f} |')
print(f'| RMSLE: {test_rmsle:.4f} | Test PPL: {math.exp(test_rmsle):7.4f} |')
print(f'| RMSE: {test_rmse:.4f} | Test PPL: {math.exp(test_rmse):7.4f} |')
print(f'| DTW: {test_tdi:.4f} | Test PPL: {math.exp(test_tdi):7.4f} |')
##########
### plot results
# total = X_test_left.shape[0]
# for i in range(0,total):
# print(i)
# X_test_left_one = X_test_left[i,:,:]
# X_test_right_one = X_test_right[i,:,:]
# y_test_one = y_test[i,:,:]
# X_test_left_one = np.expand_dims(X_test_left_one, axis=0)
# X_test_right_one = np.expand_dims(X_test_right_one, axis=0)
# y_test_one = np.expand_dims(y_test_one, axis=0)
# outputs_ori, outputs_scal = predict_ts(model, X_test_left_one, X_test_right_one, scaler_y, max_gap_size=6, BATCH_SIZE=1,device=device)
# X_test_left_one = scaler_x.inverse_transform(X_test_left[i,:,:])
# X_test_right_one = scaler_x.inverse_transform(X_test_right[i,:,:])
# y_test_one = scaler_y.inverse_transform(y_test_one.reshape(1,-1))
# X_test_left_list = X_test_left_one[:,2].tolist()
# X_test_right_list = X_test_right_one[:,2].tolist()
# y_test_list = y_test_one[0].tolist()
# outputs_ori_list = outputs_ori[:,0].tolist()
# orginal = X_test_left_list + y_test_list + X_test_right_list
# prediction = X_test_left_list + outputs_ori_list + X_test_right_list
# plt.plot(orginal, '-')
# plt.plot(prediction, '*')
# plt.show()
##########
# # get one sample for test
X_test_left = X_test_left[59:60,:,:]
X_test_right = X_test_right[59:60,:,:]
y_test = y_test[59:60,:,:]
print(X_test_left.shape)
print(X_test_right.shape)
print(y_test.shape)
outputs_ori, outputs_scal = predict_ts(model, X_test_left, X_test_right, scaler_y, max_gap_size=6, BATCH_SIZE=1,device=device)
print('*************')
X_test_left = scaler_x.inverse_transform(X_test_left[0])
X_test_right = scaler_x.inverse_transform(X_test_right[0])
y_test = scaler_y.inverse_transform(y_test.reshape(1,-1))
# print(X_test_left[:,3])
# print(X_test_right[:,3])
# print(y_test[0])
print(X_test_left[:,2])
print(X_test_right[:,2])
print(y_test[0])
X_test_left_list = X_test_left[:,0].tolist()
X_test_right_list = X_test_right[:,0].tolist()
y_test_list = y_test[0].tolist()
outputs_ori_list = outputs_ori[:,0].tolist()
# print(X_test_right[:,2].shape)
# print(y_test[0].shape)
print(X_test_left_list)
print(X_test_right_list)
print(y_test_list)
print(outputs_ori_list)
print('*************')
print('outputs_ori:{}'.format(outputs_ori_list))
print('*************')
print('outputs_scal:{}'.format(outputs_scal))
orginal = X_test_left_list + y_test_list + X_test_right_list
prediction = X_test_left_list + outputs_ori_list + X_test_right_list
print(orginal)
print(prediction)
plt.plot(orginal, '-')
plt.plot(prediction, '*')
plt.show()
| StarcoderdataPython |
1837206 | class Resource:
_standing_time = 0
_task = None
def is_free(self):
return self._task is None
def assign(self, bid, time):
if self.is_free():
self._task = bid
self._task.start(time)
def process(self, time):
if self.is_free():
self._standing_time += 1
else:
if self._task.process():
res, self._task = self._task, None
res.finalize(time + 1)
return res
def monitoring(self):
return self._standing_time
| StarcoderdataPython |
1937188 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
import spacy
import csv
nlp = spacy.load("fr_core_news_sm")
"""
use Spacy to extract keywords' POS tagging
input : csv file that contains document numbers and keywords assigned to each document
output exemple in ../data_additional/spacy_pos
"""
def extract_keyword_pos(revue):
# extract POS tagging of keywords
mc_csv = '../data_pke/reference_keywords/'+revue+'.csv'
with open(mc_csv, encoding="utf-8") as infile:
reader = csv.reader(infile, delimiter='\t')
headers=next(reader)
for rows in reader:
mcs = rows[1].split(',')
# print(mcs)
for mc in mcs:
doc = nlp(mc)
if ' ' in mc or '/' in mc or '-' in mc:
print('\t',mc, end='\t')
for token in doc:
print(token.pos_, end=' ')
print('\n')
else:
for token in doc:
print('\t', token, '\t', token.pos_)
def pos_count(pos_filein, pos_fileout):
# count POS tagging
# pos_filein : file producted by function extract_keyword_pos(revue)
dico = {}
with open(pos_only_filein, encoding='utf-8') as f:
for line in f:
m = line.strip()
if m in dico:
dico[m] +=1
else:
dico[m] =1
with open(pos_only_fileout, 'w', encoding='utf-8') as out:
for k, v in dico.items():
line = k+'\t'+str(v)+'\n'
out.write(line)
pos_count('histoiremesure_spacy_pos.txt', 'histoiremesure_spacy_pos_out.txt') | StarcoderdataPython |
39062 | """Main module."""
from functools import reduce
class Calc:
def add(self, *args):
return sum(args)
def subtract(self, a, b):
return a - b
def multiply(self, *args):
if not all(args):
raise ValueError
return reduce(lambda x, y: x*y, args)
def divide(self, a, b):
try:
return a / b
except:
return "inf"
def avg(self, args, ut=None, lt=None):
_args = args[:]
if ut:
_args = [x for x in _args if x <= ut]
if lt:
_args = [x for x in _args if x >= lt]
if not len(_args):
return 0
return sum(_args)/len(_args)
| StarcoderdataPython |
5191195 | <gh_stars>1-10
import time
import requests
from features.src.support import helpers
import os
start_time = time.time()
class Space:
def createSpace(self, spaceName):
# Tokens are stored in a form of "<access_token>;<refresh_token>(;<username>)"
theToken = helpers.get_user_tokens().split(";")[0]
print('Starting test.....')
serverAddress = os.getenv("SERVER_ADDRESS")
authHeader = 'Bearer {}'.format(theToken)
headers = {'Accept': 'application/json',
'Authorization': authHeader,
'X-App': 'OSIO',
'X-Git-Provider': 'GitHub',
'Content-Type': 'application/json'}
data = '{{\
"data": {{\
"attributes": {{\
"description": "This is the osioperf collaboration space",\
"name": "{}"\
}},\
"type": "spaces"\
}}\
}}'.format(spaceName)
print('Making request to create a new space "{}"...'.format(spaceName))
try:
r = requests.post(
'{}/api/spaces'.format(serverAddress),
headers=headers,
data=data
)
# print 'request results = {}'.format(r.content)
try:
respJson = r.json()
spaceID = respJson["data"]["id"]
print('The spaceID is: {}'.format(spaceID))
return spaceID
except ValueError:
return None
except Exception as e:
print('Unexpected space creation exception found: {}'.format(e))
print('Raw text of request/response: [{}]'.format(r.text))
| StarcoderdataPython |
196485 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Indian - Purchase Report(GST)',
'version': '1.0',
'description': """GST Purchase Report""",
'category': 'Accounting',
'depends': [
'l10n_in',
'purchase',
],
'data': [
'views/report_purchase_order.xml',
],
'installable': True,
'application': False,
'auto_install': True,
}
| StarcoderdataPython |
11263648 | <reponame>simonchristensen1/Zeeguu-Core<filename>zeeguu_core/model/starred_article.py
from datetime import datetime, time
from sqlalchemy.orm.exc import NoResultFound
import zeeguu_core
from sqlalchemy import Column, UniqueConstraint, Integer, ForeignKey, String, DateTime, Boolean
from sqlalchemy.orm import relationship
from zeeguu_core.constants import SIMPLE_TIME_FORMAT
from zeeguu_core.model import Url, User
from zeeguu_core.model.language import Language
class StarredArticle(zeeguu_core.db.Model):
"""
This keeps track of information regarding a user's starred articles.
"""
__table_args__ = {'mysql_collate': 'utf8_bin'}
id = Column(Integer, primary_key=True)
url_id = Column(Integer, ForeignKey(Url.id))
url = relationship(Url)
user_id = Column(Integer, ForeignKey(User.id))
user = relationship(User)
title = Column(String(255))
language_id = Column(Integer, ForeignKey(Language.id))
language = relationship(Language)
# Useful for ordering past read articles
starred_date = Column(DateTime)
# Together an url_id and user_id identify an article
UniqueConstraint(url_id, user_id)
def __init__(self, user, url, _title: str, language):
self.user = user
self.url = url
self.title = _title
self.language = language
self.starred_date = datetime.now()
def __repr__(self):
return f'{self.user} has starred: {self.title}'
def as_dict(self):
return dict(
user_id=self.user_id,
url=self.url.as_string(),
title=self.title,
language=self.language.code,
starred_date=self.starred_date.strftime(SIMPLE_TIME_FORMAT)
)
@classmethod
def find_or_create(cls, session, user: User, _url, _title: str, _language):
"""
create a new object and add it to the db if it's not already there
otherwise retrieve the existing object and update
in case of creation, the created object is incomplete
\ """
language = Language.find(_language)
url = Url.find_or_create(session, _url, _title)
try:
return cls.query.filter_by(
user=user,
url=url
).one()
except NoResultFound:
try:
new = cls(user, url, _title, language)
session.add(new)
session.commit()
return new
except Exception as e:
print ("seems we avoided a race condition")
session.rollback()
return cls.query.filter_by(
user=user,
url=url
).one()
@classmethod
def delete(cls, session, user, _url):
try:
url = Url.find(_url)
item = cls.query.filter_by(
user=user,
url=url
).one()
session.delete(item)
session.commit()
except Exception as e:
print(e)
@classmethod
def all_for_user(cls, user):
return cls.query.filter_by(user=user).all()
| StarcoderdataPython |
6590361 | #Different types of data in python
#1. Strings
#2. Numeric
# a. integer (int)
# b. real (float)
# c. complex (complex)
#3. Sequences
# a. lists
# b. tuples
# c. range*
#4. Boolean - True or False
#5. Many many more types
#Strings
# A string is a series of characters. ANything in quotation marks:
"This is a string"
'This is also a string'
'I told my friend, "Python is the best language."'
| StarcoderdataPython |
3286990 | import numpy as np
from sklego.dummy import RandomRegressor
import pytest
def test_values_uniform(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod = RandomRegressor(strategy="uniform")
predictions = mod.fit(X, y).predict(X)
assert (predictions >= y.min()).all()
assert (predictions <= y.max()).all()
assert mod.min_ == pytest.approx(y.min(), abs=0.0001)
assert mod.max_ == pytest.approx(y.max(), abs=0.0001)
def test_values_normal(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod = RandomRegressor(strategy="normal").fit(X, y)
assert mod.mu_ == pytest.approx(np.mean(y), abs=0.001)
assert mod.sigma_ == pytest.approx(np.std(y), abs=0.001)
def test_bad_values():
with pytest.raises(ValueError):
RandomRegressor(strategy="foobar")
| StarcoderdataPython |
3220847 | from typing import Any, List
from adapters.base_adapter import BaseProblemAdapter
from models.problem import Problem, Move
class CRGProblemAdapter(BaseProblemAdapter):
"""
Map problem data to a Python object that the renderer can use.
"""
def map_problem(self, problem_data: List[Any]) -> Problem:
"""
Given a problem data dictionary, return a Problem object
:param problem_data: Source from which to map the problem
:type problem_data: List[Any]
:return: Problem object with the parsed problem data as attributes
:rtype: Problem
"""
# Make copy of problem data so we don't modify the original.
# Problem data is the first item in the list
# [[5, 5, "s"], [4, 5, "s"], [7, 7, "m"], [9, 8, "m"], [8, 12, "m"], [6, 16, "m"], [10, 16, "m"], [5, 17, "f"]]
problem_data_copy = problem_data[0].copy()
# Parse moves
# [5, 5, "s"]
moves = []
for move_idx in range(len(problem_data_copy)):
id = move_idx + 1
# row mapping and col mapping -> zero based
col = problem_data_copy[move_idx][0]
row = problem_data_copy[move_idx][1] + 1
is_start = problem_data_copy[move_idx][2] == 's'
is_end = problem_data_copy[move_idx][2] == 'f'
m = Move(id, row, col, str(row) + ' ' + str(col), is_start, is_end)
moves.append(m)
# Parse rest of data
return Problem(
'',
'',
moves,
False)
| StarcoderdataPython |
3566989 | N = int(input())
A = [int(x) for x in input().split()]
left = [-1] * N
for i in range(1, N):
now = i-1
while now != -1 and A[now] <= A[i]:
now = left[now]
left[i] = now
right = [-1] * N
for i in reversed(range(N-1)):
now = i+1
while now != -1 and A[now] <= A[i]:
now = right[now]
right[i] = now
print(max((l+1) * (r+1) for l, r in zip(left, right)))
| StarcoderdataPython |
1774996 | <filename>Leetcode/300. Longest Increasing Subsequence/solution3.py<gh_stars>10-100
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
if not nums: return 0
result = []
for num in nums:
if not result or num > result[-1]:
result.append(num)
else:
result[bisect.bisect_left(result, num)] = num
return len(result)
| StarcoderdataPython |
6610042 | <gh_stars>1-10
#
# Copyright 2002.2.rc1710017 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PyCOMPSs Utils - Location
===================
This file contains the methods to detect the origin of the call stack.
Useful to detect if we are in the master or in the worker.
"""
import inspect
def i_am_at_master():
"""
Determine if the execution is being performed in the master node or
in a worker node.
# if 'pycompss/runtime/launch.py' in inspect.stack()[-1][1]: --> I am at master
# if (inspect.stack()[-2][3] == 'compss_worker' or
# inspect.stack()[-2][3] == 'compss_persistent_worker'): --> I am at worker
:return: <Boolean> - True if we are in the master node. False if we are in a worker node.
"""
return not inspect.stack()[-2][3] in ['compss_worker', 'compss_persistent_worker']
| StarcoderdataPython |
9716236 | """
MIT License
Copyright (c) 2020 MyerFire
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
import ratelimit
import core.caches.players
import core.minecraft.hypixel.friends
import core.minecraft.hypixel.guild
import core.minecraft.hypixel.request
import core.minecraft.hypixel.static.static
import core.minecraft.hypixel.status
async def get_player_data(uuid, *, use_cache: bool = True, get_guild: bool = False, get_friends: bool = False,
get_status: bool = False):
if not use_cache:
valid = False
else:
player_cache = await core.caches.players.find_player_data(uuid)
if player_cache: # returns cached data only if it contains all the requested information
valid = True if ((not get_guild) and (not get_friends) or (
get_friends and player_cache["data"]["friends"]) or (
get_guild and player_cache["data"]["guild_data"])) and (time.time()) - \
player_cache["time"] < 14400 else False # cached for 5 minutes
else:
valid = False
if valid:
return player_cache["data"]
else:
try:
player_json = await core.minecraft.hypixel.request.get_player_uuid(uuid)
except NameError:
raise NameError("No Hypixel stats")
except ratelimit.RateLimitException:
raise OverflowError # idk how to make custom exceptions so this is close enough
if get_guild: # only get guild if necessary, because it's another request
try:
player_guild_json = await core.minecraft.hypixel.guild.get_guild_data(uuid)
except NameError:
player_guild_json = None
except ratelimit.RateLimitException:
raise ratelimit.RateLimitException
else:
player_guild_json = None
if get_friends: # only get friends if necessary, because it's another request
try:
player_friends_json = await core.minecraft.hypixel.friends.get_friends(uuid)
except NameError:
player_friends_json = None
except ratelimit.RateLimitException:
raise ratelimit.RateLimitException
else:
player_friends_json = None
if get_status: # only get status if necessary, because it's another request
try:
player_status_json = await core.minecraft.hypixel.status.get_status(uuid)
except NameError:
player_status_json = None
except ratelimit.RateLimitException:
raise ratelimit.RateLimitException
else:
player_status_json = None
player = { # This thing is pretty torture
"name": player_json.get("player", {}).get("displayname", ""),
"level_data": (await core.minecraft.hypixel.static.static.get_network_level_data(
player_json.get("player", {}).get("networkExp", 0))),
"karma": player_json.get("player", {}).get("karma", 0),
"achievement_points": player_json.get("player", {}).get("achievementPoints", 0),
"rank_data": (
await core.minecraft.hypixel.static.static.get_rank_data((player_json.get("player", {}).get("rank", None)),
(player_json.get("player", {}).get("prefix",
None)), (
player_json.get("player", {}).get(
"monthlyPackageRank", None)), (
player_json.get("player", {}).get(
"newPackageRank", None)),
(player_json.get("packageRank", None)))),
"guild_data": player_guild_json,
"friends": player_friends_json,
"status": player_status_json,
"login_times": {
"first": player_json.get("player", {}).get("firstLogin", 0),
"last": player_json.get("player", {}).get("lastLogin", 0)
},
"social_media": {
"twitter": player_json.get("player", {}).get("socialMedia", {}).get("links", {}).get("TWITTER", None),
"youtube": player_json.get("player", {}).get("socialMedia", {}).get("links", {}).get("YOUTUBE", None),
"instagram": player_json.get("player", {}).get("socialMedia", {}).get("links", {}).get("INSTAGRAM", None),
"twitch": player_json.get("player", {}).get("socialMedia", {}).get("links", {}).get("TWITCH", None),
"discord": player_json.get("player", {}).get("socialMedia", {}).get("links", {}).get("DISCORD", None),
"hypixel_forums": player_json.get("player", {}).get("socialMedia", {}).get("links", {}).get("HYPIXEL",
None),
},
"bedwars": {
"star": player_json.get("player", {}).get("achievements", {}).get("bedwars_level", 0),
"coins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("coins_bedwars", 0),
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("beds_broken_bedwars",
0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("deaths_bedwars", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("final_kills_bedwars",
0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("winstreak", 0),
"solo": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_deaths_bedwars", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("eight_one_wins_bedwars",
0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_winstreak", 0),
},
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_deaths_bedwars", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("eight_two_wins_bedwars",
0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_winstreak", 0),
},
"threes": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_deaths_bedwars", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("four_three_wins_bedwars",
0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_three_winstreak", 0),
},
"fours": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_deaths_bedwars", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("four_four_wins_bedwars",
0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_winstreak", 0),
},
"four_v_four": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"two_four_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"two_four_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"two_four_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("two_four_kills_bedwars",
0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"two_four_deaths_bedwars", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"two_four_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"two_four_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("two_four_wins_bedwars",
0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"two_four_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("two_four_winstreak",
0),
},
"dreams": {
"armed": {
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_armed_diamond_resources_collected_bedwars", 0),
}
},
"fours": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_armed_diamond_resources_collected_bedwars", 0),
}
}
},
"castle": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get("castle_wins_bedwars",
0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"castle_diamond_resources_collected_bedwars", 0),
}
},
"lucky_blocks": {
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_lucky_diamond_resources_collected_bedwars", 0),
}
},
"fours": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_lucky_diamond_resources_collected_bedwars", 0),
}
}
},
"rush": {
"solo": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_deaths_bedwars", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_final_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_void_deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_rush_diamond_resources_collected_bedwars", 0),
}
},
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_rush_diamond_resources_collected_bedwars", 0),
}
},
"fours": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_rush_diamond_resources_collected_bedwars", 0),
}
}
},
"ultimate": {
"solo": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_one_ultimate_diamond_resources_collected_bedwars", 0),
}
},
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_ultimate_diamond_resources_collected_bedwars", 0),
}
},
"fours": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_ultimate_diamond_resources_collected_bedwars", 0),
}
}
},
"voidless": {
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"eight_two_voidless_diamond_resources_collected_bedwars", 0),
}
},
"fours": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_games_played_bedwars", 0),
"beds_broken": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_beds_broken_bedwars", 0),
"beds_lost": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_beds_lost_bedwars", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_kills_bedwars", 0),
"void_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_void_kills_bedwars", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_deaths_bedwars", 0),
"void_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_void_deaths", 0),
"final_kills": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_final_kills_bedwars", 0),
"final_deaths": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_final_deaths_bedwars", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_wins_bedwars", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_losses_bedwars", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_winstreak", 0),
"items_purchased": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless__items_purchased_bedwars", 0),
"resources_collected": {
"all": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_resources_collected_bedwars", 0),
"iron": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_iron_resources_collected_bedwars", 0),
"gold": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_gold_resources_collected_bedwars", 0),
"emeralds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_emerald_resources_collected_bedwars", 0),
"diamonds": player_json.get("player", {}).get("stats", {}).get("Bedwars", {}).get(
"four_four_voidless_diamond_resources_collected_bedwars", 0),
}
}
}
}
},
"duels": {
"coins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("coins", 0),
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("games_played_duels",
0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("losses", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("deaths", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("current_winstreak", 0),
"bow": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bow_duel_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bow_duel_kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bow_duel_deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bow_duel_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bow_duel_losses", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_bow_winstreak", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bow_duel_bow_shots",
0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bow_duel_bow_hits", 0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bow_duel_damage_dealt", 0),
},
"bridge": {
"solo": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_duel_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_duel_kills",
0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_duel_deaths",
0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_duel_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_duel_losses",
0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_bridge_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_duel_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_duel_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_duel_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_duel_bow_hits", 0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_duel_damage_dealt", 0),
},
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_doubles_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_doubles_kills",
0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_doubles_deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_doubles_wins",
0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_doubles_losses", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_bridge_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_doubles_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_doubles_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_doubles_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_doubles_bow_hits", 0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_doubles_damage_dealt", 0),
},
"fours": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_four_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_four_kills",
0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_four_deaths",
0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_four_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("bridge_four_losses",
0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_bridge_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_four_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_four_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_four_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_four_bow_hits", 0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"bridge_four_damage_dealt", 0),
},
},
"classic": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"classic_duel_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("classic_duel_kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("classic_duel_deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("classic_duel_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("classic_duel_losses", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_classic_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"classic_duel_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"classic_duel_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"classic_duel_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("classic_duel_bow_hits",
0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"classic_duel_damage_dealt", 0),
},
"skywars": {
"solo": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_duel_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_duel_kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_duel_deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_duel_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_duel_losses", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_sw_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_duel_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_duel_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_duel_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_duel_bow_hits",
0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_duel_damage_dealt", 0),
},
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_doubles_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_doubles_kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_doubles_deaths",
0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_doubles_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("sw_doubles_losses",
0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_sw_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_doubles_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_doubles_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_doubles_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_doubles_bow_hits", 0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"sw_doubles_damage_dealt", 0),
}
},
"uhc": {
"solo": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_duel_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_duel_kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_duel_deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_duel_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_duel_losses", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_uhc_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_duel_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_duel_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_duel_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_duel_bow_hits",
0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_duel_damage_dealt", 0),
},
"doubles": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_doubles_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_doubles_kills",
0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_doubles_deaths",
0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_doubles_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_doubles_losses",
0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_uhc_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_doubles_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_doubles_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_doubles_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_doubles_bow_hits", 0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_doubles_damage_dealt", 0),
},
"fours": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_four_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_four_kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_four_deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_four_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_four_losses", 0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_uhc_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_four_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_four_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_four_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_four_bow_hits",
0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_four_damage_dealt", 0),
},
"deathmatch": {
"games_played": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_meetup_rounds_played", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_meetup_kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_meetup_deaths",
0),
"wins": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_meetup_wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get("uhc_meetup_losses",
0),
"winstreak": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"current_uhc_winstreak", 0),
"melee_swings": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_meetup_melee_swings", 0),
"melee_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_meetup_melee_hits", 0),
"bow_shots": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_meetup_bow_shots", 0),
"bow_hits": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_meetup_bow_hits", 0),
"damage_dealt": player_json.get("player", {}).get("stats", {}).get("Duels", {}).get(
"uhc_meetup_damage_dealt", 0),
}
}
},
"paintball": {
"coins": player_json.get("player", {}).get("stats", {}).get("Paintball", {}).get("coins", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("Paintball", {}).get("kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("Paintball", {}).get("deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("Paintball", {}).get("wins", 0),
"killstreaks": player_json.get("player", {}).get("stats", {}).get("Paintball", {}).get("killstreaks", 0),
"shots_fired": player_json.get("player", {}).get("stats", {}).get("Paintball", {}).get("shots_fired", 0)
},
"skywars": {
"level_data": (await core.minecraft.hypixel.static.static.get_skywars_level_data_from_experience(
(player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get("skywars_experience", 0)))),
"coins": player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get("coins", 0),
"tokens": player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get("cosmetic_tokens", 0),
"souls": player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get("souls", 0),
"kills": player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get("kills", 0),
"deaths": player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get("deaths", 0),
"wins": player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get("wins", 0),
"losses": player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get("losses", 0),
"games_played": player_json.get("player", {}).get("stats", {}).get("SkyWars", {}).get(
"games_played_skywars", 0)
}
}
await core.caches.players.save_player_data(uuid, player)
return player
| StarcoderdataPython |
9783935 | <reponame>bcgov-c/ligo-lib
import filecmp
import os
import pytest
import shutil
import linker.core.link_json as lj
from test.linker.utils import Utils
@pytest.fixture(params=[
'levenshtein',
pytest.param('combination', marks=pytest.mark.slow)
])
def context(request):
work_path = os.path.join(os.path.dirname(__file__), request.param)
project = Utils.load_json(os.path.join(work_path, 'test1.json'))
task_uuid = project['task_uuid']
def teardown():
if os.path.isdir(os.path.join(work_path, task_uuid)):
shutil.rmtree(os.path.join(work_path, task_uuid))
if os.path.exists(project['temp_path']):
shutil.rmtree(project['temp_path'])
request.addfinalizer(teardown)
return work_path, task_uuid
def test_functional_linking(context):
"""Functional tests for de-duplication"""
work_path, task_uuid = context
lj.main(['-p', work_path + os.sep + 'test1.json'])
assert filecmp.cmp(work_path + os.sep + 'results1_linked_data.csv',
work_path + os.sep + task_uuid + os.sep +
'linked_data.csv', shallow=False)
assert filecmp.cmp(
work_path + os.sep + 'results1_matched_not_linked.csv',
work_path + os.sep + task_uuid + os.sep +
'matched_not_linked_data.csv', shallow=False)
| StarcoderdataPython |
1615981 | <reponame>stormi/tsunami
# -*-coding:Utf-8 -*
# Copyright (c) 2010 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'qui'.
"""
from primaires.interpreteur.commande.commande import Commande
from primaires.format.fonctions import format_nb, supprimer_accents
class CmdQui(Commande):
"""Commande 'qui'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "qui", "who")
self.schema = ""
self.aide_courte = "affiche les joueurs connectés"
self.aide_longue = \
"Cette commande permet d'afficher la liste des joueurs " \
"actuellement connectés au MUD."
def interpreter(self, personnage, dic_masques):
"""Interprétation de la commande"""
joueurs = type(self).importeur.connex.joueurs_connectes
if not joueurs:
personnage.envoyer("Aucun joueur ne semble être présent, mais " \
"qui es-tu alors ?")
else:
noms_joueurs = {}
for joueur in joueurs:
imm = 0
if joueur.est_immortel():
nom = "|cyc|~ " + joueur.nom + " ~|ff|"
imm = 9
else:
nom = " " + joueur.nom
if joueur.afk:
raison = ""
if joueur.afk is not "afk":
raison = " '" + joueur.afk + "'"
nom += " (|rgc|AFK" + raison + "|ff|)"
noms_joueurs[joueur] = nom.ljust(48 + imm) + "|"
else:
noms_joueurs[joueur] = nom.ljust(39 + imm) + "|"
res = "+" + "-" * 40 + "+\n"
res += "| |tit|Joueurs présents|ff|".ljust(50) + "|\n"
res += "+" + "-" * 40 + "+"
for j, nom in sorted(noms_joueurs.items(),
key=lambda c: supprimer_accents(c[0].nom)):
res += "\n| " + nom
res += "\n+" + "-" * 40 + "+\n"
nb_joueurs = len(joueurs)
nb_imms = len([j for j in noms_joueurs.keys() if j.est_immortel()])
imms = ""
if nb_imms > 0:
s = "s" if nb_imms > 1 else ""
nb = "un" if nb_imms == 1 else str(nb_imms)
imms = ", dont |jn|{nb} immortel{s}|ff|".format(nb=nb, s=s)
res += format_nb(nb_joueurs,
"{{nb}} joueur{{s}} connecté{{s}}{}.".format(imms))
personnage << res
| StarcoderdataPython |
351712 | <reponame>flavioribeiro/brave
import time, pytest, inspect
from utils import *
def test_initial_state_option_on_startup(run_brave, create_config_file):
'''
Test that if 'initial_state' is set as a property, it is honored.
It can be set for inputs, outputs and mixers.
'''
output_image_location0 = create_output_image_location()
output_image_location1 = create_output_image_location()
config = {
'default_inputs': [
{'type': 'test_video', 'props': {'pattern': 4, 'initial_state': 'PLAYING'}},
{'type': 'test_video', 'props': {'pattern': 5, 'initial_state': 'PAUSED'}},
{'type': 'test_video', 'props': {'pattern': 6, 'initial_state': 'READY'}},
{'type': 'test_video', 'props': {'pattern': 7, 'initial_state': 'NULL'}},
],
'default_mixers': [
{'props': {'initial_state': 'PLAYING'}},
{'props': {'initial_state': 'PAUSED'}},
{'props': {'initial_state': 'READY'}},
{'props': {'initial_state': 'NULL'}},
],
'default_outputs': [
{'type': 'image', 'props': {'location': output_image_location0, 'initial_state': 'PLAYING'}},
{'type': 'image', 'props': {'location': output_image_location0, 'initial_state': 'PAUSED'}},
{'type': 'image', 'props': {'location': output_image_location0, 'initial_state': 'READY'}},
{'type': 'image', 'props': {'location': output_image_location0, 'initial_state': 'NULL'}},
]
}
config_file = create_config_file(config)
run_brave(config_file.name)
time.sleep(2)
check_brave_is_running()
response = api_get('/api/all')
assert response.status_code == 200
details = response.json()
assert details['inputs'][0]['state'] == 'PLAYING'
assert details['inputs'][1]['state'] == 'PAUSED'
assert details['inputs'][2]['state'] == 'READY'
assert details['inputs'][3]['state'] == 'NULL'
assert details['mixers'][0]['state'] == 'PLAYING'
assert details['mixers'][1]['state'] == 'PAUSED'
assert details['mixers'][2]['state'] == 'READY'
assert details['mixers'][3]['state'] == 'NULL'
assert details['outputs'][0]['state'] == 'PLAYING'
assert details['outputs'][1]['state'] == 'PAUSED'
assert details['outputs'][2]['state'] == 'READY'
assert details['outputs'][3]['state'] == 'NULL'
def test_initial_state_option_via_api(run_brave):
'''
Test that if 'initial_state' is set as a property, it is honored.
It can be set for inputs, outputs and mixers.
'''
run_brave()
check_brave_is_running()
response = api_get('/api/all')
assert response.status_code == 200
assert_everything_in_playing_state(response.json())
output_image_location0 = create_output_image_location()
add_input({'type': 'test_audio', 'props': {'initial_state': 'NULL'}})
add_input({'type': 'test_audio', 'props': {'initial_state': 'READY'}})
add_input({'type': 'test_audio', 'props': {'initial_state': 'PAUSED'}})
add_input({'type': 'test_audio', 'props': {'initial_state': 'PLAYING'}})
# TODO Uncomment when the API adds support for creating new mixers
# add_mixer({'props': {'initial_state': 'NULL'}})
# add_mixer({'props': {'initial_state': 'READY'}})
# add_mixer({'props': {'initial_state': 'PAUSED'}})
# add_mixer({'props': {'initial_state': 'PLAYING'}})
add_output({'type': 'image', 'props': {'location': output_image_location0, 'initial_state': 'NULL'}})
add_output({'type': 'image', 'props': {'location': output_image_location0, 'initial_state': 'READY'}})
add_output({'type': 'image', 'props': {'location': output_image_location0, 'initial_state': 'PAUSED'}})
add_output({'type': 'image', 'props': {'location': output_image_location0, 'initial_state': 'PLAYING'}})
time.sleep(1)
response = api_get('/api/all')
assert response.status_code == 200
details = response.json()
print(response.json())
assert details['inputs'][0]['state'] == 'NULL'
assert details['inputs'][1]['state'] == 'READY'
assert details['inputs'][2]['state'] == 'PAUSED'
assert details['inputs'][3]['state'] == 'PLAYING'
# Mixer 0 is the default:
assert details['mixers'][0]['state'] == 'PLAYING'
# TODO Uncomment when the API adds support for creating new mixers
# assert details['mixers'][1]['state'] == 'NULL'
# assert details['mixers'][2]['state'] == 'READY'
# assert details['mixers'][3]['state'] == 'PAUSED'
# assert details['mixers'][4]['state'] == 'PLAYING'
assert details['outputs'][0]['state'] == 'NULL'
assert details['outputs'][1]['state'] == 'READY'
assert details['outputs'][2]['state'] == 'PAUSED'
assert details['outputs'][3]['state'] == 'PLAYING'
| StarcoderdataPython |
1798352 | <filename>lambdarado/_wrap_handler_default.py
# SPDX-FileCopyrightText: (c) 2021 <NAME> <github.com/rtmigo>
# SPDX-License-Identifier: MIT
import json
import os
from aws_lambda_context import LambdaContext
from typing import Dict
from lambdarado._common import AwsHandlerFunc
def _is_true_environ(key: str, default: bool = False) -> bool:
value = os.environ.get(key)
if value is None:
return default
value = value.strip()
if value.isdigit():
return int(value) != 0
value = value.lower()
if value == 'true':
return True
if value == 'false':
return False
return default
def wrap_aws_handler_default(handler: AwsHandlerFunc) -> AwsHandlerFunc:
"""Used as a default value for start(wrap_handler=...).
Creates a handler that will write JSON requests and responses to the
stdout (i.e. CloudWatch logs). The writing is only happens when either
LOG_LAMBDA_REQUESTS or LOG_LAMBDA_RESPONSES environment variable is set.
"""
# todo test it (locally, without aws)
log_requests = _is_true_environ("LOG_LAMBDA_REQUESTS", default=False)
log_responses = _is_true_environ("LOG_LAMBDA_RESPONSES", default=False)
print(f"wrap_aws_handler_default: log_requests={log_requests}")
print(f"wrap_aws_handler_default: log_responses={log_responses}")
if log_requests or log_responses:
# if something should be logged
def wrapper(event: Dict, context: LambdaContext) -> Dict:
if log_requests:
print("-- request start -----------------------------------")
print(json.dumps(event, indent=2, sort_keys=True))
print("-- request end -------------------------------------")
response = handler(event, context)
if log_responses:
print("-- response start ----------------------------------")
print(json.dumps(response, indent=2, sort_keys=True))
print("-- response end ------------------------------------")
return response
return wrapper
else:
# do not wrap
return handler
# wrap_aws_handler_default.log_requests = None
# wrap_aws_handler_default.log_responses = None
| StarcoderdataPython |
4837867 | <reponame>opencv/deep-person-reid
import argparse
import os
import re
import tempfile
from pathlib import Path
from subprocess import run # nosec
import json
import numpy as np
from ruamel.yaml import YAML
def get_lr_sets(model_name: str):
if "mobilenet" in model_name:
return {"COCO": 0.0001, "VOC": 0.0002,"NUS": 0.0001, "VG500": 0.0001}
elif 'efficientnetv2' in model_name:
return {"COCO": 0.0001, "VOC": 0.0001,"NUS": 0.0001, "VG500": 0.0001}
else:
print("Unknown model. Use stadart predefined lrs")
return {"COCO": 0.0001, "VOC": 0.0001,"NUS": 0.0001, "VG500": 0.0001}
def read_config(yaml: YAML, config_path: str):
yaml.default_flow_style = True
with open(config_path, 'r') as f:
cfg = yaml.load(f)
return cfg
def dump_config(yaml: YAML, config_path: str, cfg: dict):
with open(config_path, 'w') as f:
yaml.default_flow_style = True
yaml.dump(cfg, f)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument( '--root', type=str, required=False, default='/ssd/datasets', help='path to folder with datasets')
parser.add_argument('--config', type=str, required=False, help='path to config file')
parser.add_argument('--path-to-main', type=str, default='./tools/main.py',required=False, help='path to main.py file')
parser.add_argument('--gpu-num', type=int, default=1, help='Number of GPUs for training. 0 is for CPU mode')
parser.add_argument('--use-hardcoded-lr', action='store_true')
parser.add_argument('-d','--domains', nargs='+', help='On what domains to train', required=False, default=['all'])
parser.add_argument('-lrs', '--lr-sets', type=json.loads, default='{"COCO": 0.0001, "VOC": 0.0002,'
'"NUS": 0.0001, "VG500": 0.0001}')
parser.add_argument('--dump-results', type=bool, default=True, help='whether or not to dump results of the experiment')
args = parser.parse_args()
yaml = YAML()
# datasets to experiment with
datasets = dict(
coco=dict(
roots=['coco/train.json', 'coco/val.json'],
names=['coco_train', 'coco_val'],
types=['multilabel_classification', 'multilabel_classification'],
sources='coco_train',
targets='coco_val',
),
voc=dict(
roots=['mlc_voc_2007/train.json', 'mlc_voc_2007/val.json'],
names=['voc_train', 'voc_val'],
types=['multilabel_classification', 'multilabel_classification'],
sources='voc_train',
targets='voc_val',
),
vg500=dict(
roots=['VG500/train.json', 'VG500/val.json'],
names=['VG500_train', 'VG500_val'],
types=['multilabel_classification', 'multilabel_classification'],
sources='VG500_train',
targets='VG500_val',
),
nus_wide=dict(
roots=['nus_wide/train.json', 'nus_wide/val.json'],
names=['nus_wide_train', 'nus_wide_val'],
types=['multilabel_classification', 'multilabel_classification'],
sources='nus_wide_train',
targets='nus_wide_val',
),
pets=dict(
roots=['oxford_pets/train.json', 'oxford_pets/val.json'],
names=['oxford_pets_train', 'oxford_pets_val'],
types=['multilabel_classification', 'multilabel_classification'],
sources='oxford_pets_train',
targets='oxford_pets_val',
),
bbcd=dict(
roots=['BBCD/train.json', 'BBCD/val.json'],
names=['bbcd_train', 'bbcd_val'],
types=['multilabel_classification', 'multilabel_classification'],
sources='bbcd_train',
targets='bbcd_val',
),
aerial_maritime=dict(
roots=['Aerial_Maritime/train.json', 'Aerial_Maritime/val.json'],
names=['aerial_maritime_train', 'aerial_maritime_val'],
types=['multilabel_classification', 'multilabel_classification'],
sources='aerial_maritime_train',
targets='aerial_maritime_val',
),
)
path_to_base_cfg = args.config
# write datasets you want to skip
domains = args.domains
if 'all' in domains:
domains = set(datasets.keys())
for key in domains:
params = datasets[key]
cfg = read_config(yaml, path_to_base_cfg)
lrs_dict = get_lr_sets(cfg["model"]["name"])
path_to_exp_folder = cfg['data']['save_dir']
name_train = params['names'][0]
name_val = params['names'][1]
type_train = params['types'][0]
type_val = params['types'][1]
root_train = args.root + os.sep + params['roots'][0]
root_val = args.root + os.sep + params['roots'][1]
if args.use_hardcoded_lr:
print("WARNING: Using hardcoded LR")
if key in lrs_dict:
cfg['lr_finder']["enable"] = False
cfg["train"]["lr"] = lrs_dict[key]
else:
cfg['lr_finder']["enable"] = True
cfg['custom_datasets']['roots'] = [root_train, root_val]
cfg['custom_datasets']['types'] = [type_train, type_val]
cfg['custom_datasets']['names'] = [name_train, name_val]
cfg['data']['save_dir'] = path_to_exp_folder + f"/{key}"
source = params['sources']
targets = params['targets']
cfg['data']['sources'] = [source]
cfg['data']['targets'] = [targets]
# dump it
fd, tmp_path_to_cfg = tempfile.mkstemp(suffix='.yml')
try:
with os.fdopen(fd, 'w') as tmp:
# do stuff with temp file
yaml.default_flow_style = True
yaml.dump(cfg, tmp)
tmp.close()
# run training
run(['python', f'{str(args.path_to_main)}',
'--config', f'{tmp_path_to_cfg}',
'--gpu-num', f'{int(args.gpu_num)}'],
shell=False)
finally:
os.remove(tmp_path_to_cfg)
# after training combine all outputs in one file
if args.dump_results:
path_to_bash = str(Path.cwd() / 'tools/classification/parse_output.sh')
run(['bash', f'{path_to_bash}', f'{path_to_exp_folder}'], shell=False)
saver = dict()
path_to_file = f"{path_to_exp_folder}/combine_all.txt"
# parse output file from bash script
with open(path_to_file, 'r') as f:
for line in f:
if line.strip() in datasets.keys():
next_dataset = line.strip()
saver[next_dataset] = dict()
continue
else:
for metric in ['mAP', 'F_O', 'mean_F_C']:
if line.strip().startswith(metric):
if not metric in saver[next_dataset]:
saver[next_dataset][metric] = []
pattern = re.search('\d+\.\d+', line.strip())
if pattern:
saver[next_dataset][metric].append(
float(pattern.group(0))
)
# dump in appropriate patern
names = ''
values = ''
with open(path_to_file, 'a') as f:
for key in sorted(datasets.keys()):
names += key + ' '
if key in saver:
best_top_1_idx = np.argmax(saver[key]['F_O'])
fo = str(saver[key]['F_O'][best_top_1_idx])
mAP = str(saver[key]['mAP'][best_top_1_idx])
fc = str(saver[key]['mean_F_C'][best_top_1_idx])
values += mAP + ';' + fo + ';' + fc + ';'
else:
values += '-1;-1;-1;'
f.write(f"\n{names}\n{values}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3404539 | <gh_stars>10-100
#!/usr/bin/env python3.8
# Copyright 2020, Schweitzer Engineering Laboratories, Inc
# SEL Confidential
import random
import json
import itertools
from ..init import (
uut,
UTLOG,
LOGID,
nose,
)
from .. import tools
@tools.setup(progress_bar=True)
def test_str_base_resolution():
"""Verify str_base property resolution
"""
# All possible permutations of mismatch_alerts
bases = tuple(x for x in uut.properties.StrConv.keys())
permutations = [
(
{'str_base': x},
{'str_base': y},
)
for x, y in itertools.product(bases, repeat=2)
]
# Instances to be used
x = uut.FixedPoint(0)
y = uut.FixedPoint(0)
UUT = uut.properties.PropertyResolver()
# Run the regressions
for xkwargs, ykwargs in tools.test_iterator(permutations):
UTLOG.debug("x: %s\ny: %s",
json.dumps(xkwargs, indent=2),
json.dumps(ykwargs, indent=2),
**LOGID
)
with x(safe_retain=True, **xkwargs), y(safe_retain=True, **ykwargs):
result = UUT.str_base(x, y)
if result == 10:
nose.tools.assert_equal(x.str_base, 10)
nose.tools.assert_equal(y.str_base, 10)
elif result == 8:
nose.tools.assert_equal(x.str_base, 8)
nose.tools.assert_equal(y.str_base, 8)
elif result == 2:
nose.tools.assert_equal(x.str_base, 2)
nose.tools.assert_equal(y.str_base, 2)
else:
nose.tools.assert_equal(result, 16)
@tools.setup(progress_bar=True)
def test_implicit_cast_alert_resolution():
"""Verify implicit_cast_alert property resolution
"""
# All possible permutations of overflow_alerts and mismatch_alerts
alerts = tuple(x.name for x in uut.properties.Alert)
permutations = [
(
{'implicit_cast_alert': xi, 'mismatch_alert': xm},
{'implicit_cast_alert': yi, 'mismatch_alert': ym},
)
for xi, yi, xm, ym in itertools.product(alerts, repeat=4)
]
# Error and warning messages.
merrmsg = r"Non-matching mismatch_alert behaviors \[.*'error'.*\]."
ierrmsg = r"Non-matching implicit_cast_alert behaviors"
mwarnmsg = r"Using 'warning'\."
def iwarnmsg(x, y):
A = (x.implicit_cast_alert, y.implicit_cast_alert)
return r'Using %r\.' % (
'warning' if 'warning' in A else
'error' if 'error' in A else 'ignore'
)
# Instances to be used
x = uut.FixedPoint(0)
y = uut.FixedPoint(0)
UUT = uut.properties.PropertyResolver()
# Run the regressions
for xkwargs, ykwargs in tools.test_iterator(permutations):
UTLOG.debug("x: %s\ny: %s",
json.dumps(xkwargs, indent=2),
json.dumps(ykwargs, indent=2),
**LOGID
)
with x(safe_retain=True, **xkwargs), y(safe_retain=True, **ykwargs):
args = (x, y)
M = (x.mismatch_alert, y.mismatch_alert)
if 'error' in M:
try:
result = UUT.implicit_cast_alert(*args)
except uut.MismatchError as exc:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(str(exc), merrmsg)
else:
nose.tools.assert_regex(str(exc), ierrmsg)
continue
else:
nose.tools.assert_equal(x.implicit_cast_alert, y.implicit_cast_alert)
elif 'warning' in M:
with tools.CaptureWarnings() as warn:
result = UUT.implicit_cast_alert(*args)
UTLOG.debug('Warning logs (%d): %s',
len(logs := warn.logs),
json.dumps(logs, indent=2),
**LOGID
)
if x.implicit_cast_alert != y.implicit_cast_alert:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(logs[0], merrmsg.replace('error', 'warning'))
nose.tools.assert_regex(logs[1], mwarnmsg)
nose.tools.assert_regex(logs[-2], ierrmsg)
nose.tools.assert_regex(logs[-1], iwarnmsg(*args))
else:
nose.tools.assert_equal(len(logs), 0)
else:
with tools.CaptureWarnings() as warn:
result = UUT.implicit_cast_alert(*args)
nose.tools.assert_equal(len(warn.logs), 0)
# Validate property resolution
if result == 'ignore':
nose.tools.assert_not_equal(x.implicit_cast_alert, 'error')
nose.tools.assert_not_equal(y.implicit_cast_alert, 'error')
nose.tools.assert_not_equal(x.implicit_cast_alert, 'warning')
nose.tools.assert_not_equal(y.implicit_cast_alert, 'warning')
elif result == 'error':
nose.tools.assert_not_equal(x.implicit_cast_alert, 'warning')
nose.tools.assert_not_equal(y.implicit_cast_alert, 'warning')
@tools.setup(progress_bar=True)
def test_mismatch_alert_resolution():
"""Verify mismatch_alert property resolution
"""
# All possible permutations of mismatch_alerts
alerts = tuple(x.name for x in uut.properties.Alert)
permutations = [
(
{'mismatch_alert': xm},
{'mismatch_alert': ym},
)
for xm, ym in itertools.product(alerts, repeat=2)
]
# Error and warning messages.
merrmsg = r"Non-matching mismatch_alert behaviors \[.*'error'.*\]."
mwarnmsg = r"Using 'warning'\."
# Instances to be used
x = uut.FixedPoint(0)
y = uut.FixedPoint(0)
UUT = uut.properties.PropertyResolver()
# Run the regressions
for xkwargs, ykwargs in tools.test_iterator(permutations):
UTLOG.debug("x: %s\ny: %s",
json.dumps(xkwargs, indent=2),
json.dumps(ykwargs, indent=2),
**LOGID
)
with x(safe_retain=True, **xkwargs), y(safe_retain=True, **ykwargs):
args = (x, y)
M = (x.mismatch_alert, y.mismatch_alert)
if 'error' in M:
try:
result = UUT.mismatch_alert(*args)
except uut.MismatchError as exc:
nose.tools.assert_regex(str(exc), merrmsg)
nose.tools.assert_not_equal(*M)
continue
else:
nose.tools.assert_equal(*M)
elif 'warning' in M:
with tools.CaptureWarnings() as warn:
result = UUT.mismatch_alert(*args)
UTLOG.debug('Warning logs (%d): %s',
len(logs := warn.logs),
json.dumps(logs, indent=2),
**LOGID
)
if x.mismatch_alert != y.mismatch_alert:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(logs[0], merrmsg.replace('error', 'warning'))
nose.tools.assert_regex(logs[1], mwarnmsg)
else:
nose.tools.assert_equal(len(logs), 0)
else:
with tools.CaptureWarnings() as warn:
result = UUT.mismatch_alert(*args)
nose.tools.assert_equal(len(warn.logs), 0)
# Validate property resolution
if result == 'ignore':
nose.tools.assert_not_equal(x.mismatch_alert, 'error')
nose.tools.assert_not_equal(y.mismatch_alert, 'error')
nose.tools.assert_not_equal(x.mismatch_alert, 'warning')
nose.tools.assert_not_equal(y.mismatch_alert, 'warning')
elif result == 'warning':
nose.tools.assert_not_equal(x.mismatch_alert, 'error')
nose.tools.assert_not_equal(y.mismatch_alert, 'error')
@tools.setup(progress_bar=True)
def test_overflow_alert_resolution():
"""Verify overflow_alert property resolution
"""
# All possible permutations of overflow_alerts and mismatch_alerts
alerts = tuple(x.name for x in uut.properties.Alert)
permutations = [
(
{'overflow_alert': xo, 'mismatch_alert': xm},
{'overflow_alert': yo, 'mismatch_alert': ym},
)
for xo, yo, xm, ym in itertools.product(alerts, repeat=4)
]
# Error and warning messages.
merrmsg = r"Non-matching mismatch_alert behaviors \[.*'error'.*\]."
oerrmsg = r"Non-matching overflow_alert behaviors"
mwarnmsg = r"Using 'warning'\."
def owarnmsg(x, y):
A = (x.overflow_alert, y.overflow_alert)
return r'Using %r\.' % (
'error' if 'error' in A else
'warning' if 'warning' in A else 'ignore'
)
# Instances to be used
x = uut.FixedPoint(0)
y = uut.FixedPoint(0)
UUT = uut.properties.PropertyResolver()
# Run the regressions
for xkwargs, ykwargs in tools.test_iterator(permutations):
UTLOG.debug("x: %s\ny: %s",
json.dumps(xkwargs, indent=2),
json.dumps(ykwargs, indent=2),
**LOGID
)
with x(safe_retain=True, **xkwargs), y(safe_retain=True, **ykwargs):
args = (x, y)
M = (x.mismatch_alert, y.mismatch_alert)
if 'error' in M:
try:
result = UUT.overflow_alert(*args)
except uut.MismatchError as exc:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(str(exc), merrmsg)
else:
nose.tools.assert_regex(str(exc), oerrmsg)
continue
else:
nose.tools.assert_equal(x.overflow_alert, y.overflow_alert)
elif 'warning' in M:
with tools.CaptureWarnings() as warn:
result = UUT.overflow_alert(*args)
UTLOG.debug('Warning logs (%d): %s',
len(logs := warn.logs),
json.dumps(logs, indent=2),
**LOGID
)
if x.overflow_alert != y.overflow_alert:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(logs[0], merrmsg.replace('error', 'warning'))
nose.tools.assert_regex(logs[1], mwarnmsg)
nose.tools.assert_regex(logs[-2], oerrmsg)
nose.tools.assert_regex(logs[-1], owarnmsg(*args))
else:
nose.tools.assert_equal(len(logs), 0)
else:
with tools.CaptureWarnings() as warn:
result = UUT.overflow_alert(*args)
nose.tools.assert_equal(len(warn.logs), 0)
# Validate property resolution
if result == 'ignore':
nose.tools.assert_not_equal(x.overflow_alert, 'error')
nose.tools.assert_not_equal(y.overflow_alert, 'error')
nose.tools.assert_not_equal(x.overflow_alert, 'warning')
nose.tools.assert_not_equal(y.overflow_alert, 'warning')
elif result == 'warning':
nose.tools.assert_not_equal(x.overflow_alert, 'error')
nose.tools.assert_not_equal(y.overflow_alert, 'error')
@tools.setup(progress_bar=True)
def test_rounding_resolution():
"""Verify rounding property resolution
"""
# All possible permutations of roundings and mismatch_alerts
roundings = tuple(x.name for x in uut.properties.Rounding)
alerts = tuple(x.name for x in uut.properties.Alert)
permutations = [
(
{'rounding': x, 'mismatch_alert': xm, 'signed': xs},
{'rounding': y, 'mismatch_alert': ym, 'signed': ys},
)
for x, y in itertools.product(roundings, repeat=2)
for xm, ym in itertools.product(alerts, repeat=2)
for xs, ys in itertools.product((True, False), repeat=2)
]
# Error and warning messages.
merrmsg = r"Non-matching mismatch_alert behaviors \[.*'error'.*\]."
rerrmsg = r"Non-matching rounding behaviors"
mwarnmsg = r"Using 'warning'\."
def rwarnmsg(x, y):
R = (x.rounding, y.rounding)
return r"Using %r\." % (
(
(
'convergent' if 'convergent' in R else
'nearest' if 'nearest' in R else
'down' if 'down' in R else
'in' if 'in' in R else
'out' if 'out' in R else 'up'
) if x.signed or y.signed else (
'nearest' if 'nearest' in R else
'convergent' if 'convergent' in R else
'down' if 'down' in R else
'in' if 'in' in R else
'out' if 'out' in R else 'up'
)
)
)
# Instances to be used
x = uut.FixedPoint(0, m=1)
y = uut.FixedPoint(0, m=1)
UUT = uut.properties.PropertyResolver()
# Run the regressions
for xkwargs, ykwargs in tools.test_iterator(permutations):
UTLOG.debug("x: %s\ny: %s",
json.dumps(xkwargs, indent=2),
json.dumps(ykwargs, indent=2),
**LOGID
)
with x(safe_retain=True, **xkwargs), y(safe_retain=True, **ykwargs):
args = (x, y)
if any(a.mismatch_alert == 'error' for a in args):
try:
result = UUT.rounding(x, y)
except uut.MismatchError as exc:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(str(exc), merrmsg)
else:
nose.tools.assert_regex(str(exc), rerrmsg)
continue
else:
nose.tools.assert_equal(x.rounding, y.rounding)
elif any(a.mismatch_alert == 'warning' for a in args):
with tools.CaptureWarnings() as warn:
result = UUT.rounding(x, y)
UTLOG.debug('Warning logs (%d): %s',
len(logs := warn.logs),
json.dumps(logs, indent=2),
**LOGID
)
if x.rounding != y.rounding:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(logs[0], merrmsg.replace('error', 'warning'))
nose.tools.assert_regex(logs[1], mwarnmsg)
nose.tools.assert_regex(logs[-2], rerrmsg)
nose.tools.assert_regex(logs[-1], rwarnmsg(x, y))
else:
nose.tools.assert_equal(len(logs), 0)
else:
with tools.CaptureWarnings() as warn:
result = UUT.rounding(x, y)
nose.tools.assert_equal(len(warn.logs), 0)
# Validate property resolution
R = (x.rounding, y.rounding)
signed = (x.signed, y.signed)
if result == 'up':
nose.tools.assert_not_in('out', R)
nose.tools.assert_not_in('in', R)
nose.tools.assert_not_in('down', R)
nose.tools.assert_not_in('nearest', R)
nose.tools.assert_not_in('convergent', R)
elif result == 'out':
nose.tools.assert_not_in('in', R)
nose.tools.assert_not_in('down', R)
nose.tools.assert_not_in('nearest', R)
nose.tools.assert_not_in('convergent', R)
elif result == 'in':
nose.tools.assert_not_in('down', R)
nose.tools.assert_not_in('nearest', R)
nose.tools.assert_not_in('convergent', R)
elif result == 'down':
nose.tools.assert_not_in('nearest', R)
nose.tools.assert_not_in('convergent', R)
elif result == 'nearest' and any(signed):
nose.tools.assert_not_in('convergent', R)
elif result == 'convergent' and not any(signed):
nose.tools.assert_not_in('nearest', R)
@tools.setup(progress_bar=True)
def test_overflow_resolution():
"""Verify overflow property resolution
"""
# All possible permutations of overflows and mismatch_alerts
overflows = tuple(x.name for x in uut.properties.Overflow)
alerts = tuple(x.name for x in uut.properties.Alert)
permutations = [
(
{'overflow': x, 'mismatch_alert': xx},
{'overflow': y, 'mismatch_alert': yy},
)
for x, y in itertools.product(overflows, repeat=2)
for xx, yy in itertools.product(alerts, repeat=2)
]
# Error and warning messages.
merrmsg = r"Non-matching mismatch_alert behaviors \[.*'error'.*\]."
oerrmsg = r"Non-matching overflow behaviors"
mwarnmsg = r"Using 'warning'\."
owarnmsg = r"Using 'clamp'\."
# Instances to be used
x = uut.FixedPoint(0)
y = uut.FixedPoint(0)
UUT = uut.properties.PropertyResolver()
# Run the regressions
for xkwargs, ykwargs in tools.test_iterator(permutations):
UTLOG.debug("x: %s\ny: %s",
json.dumps(xkwargs, indent=2),
json.dumps(ykwargs, indent=2),
**LOGID
)
with x(safe_retain=True, **xkwargs), y(safe_retain=True, **ykwargs):
args = (x, y)
if any(a.mismatch_alert == 'error' for a in args):
try:
result = UUT.overflow(x, y)
except uut.MismatchError as exc:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(str(exc), merrmsg)
else:
nose.tools.assert_regex(str(exc), oerrmsg)
continue
else:
nose.tools.assert_equal(x.overflow, y.overflow)
elif any(a.mismatch_alert == 'warning' for a in args):
with tools.CaptureWarnings() as warn:
result = UUT.overflow(x, y)
UTLOG.debug('Warning logs (%d): %s',
len(logs := warn.logs),
json.dumps(logs, indent=2),
**LOGID
)
if x.overflow != y.overflow:
if x.mismatch_alert != y.mismatch_alert:
nose.tools.assert_regex(logs[0], merrmsg.replace('error', 'warning'))
nose.tools.assert_regex(logs[1], mwarnmsg)
nose.tools.assert_regex(logs[-2], oerrmsg)
nose.tools.assert_regex(logs[-1], owarnmsg)
else:
nose.tools.assert_equal(len(logs), 0)
else:
with tools.CaptureWarnings() as warn:
result = UUT.overflow(x, y)
nose.tools.assert_equal(len(warn.logs), 0)
# Validate property resolution
if result == 'wrap':
nose.tools.assert_equal(x.overflow, 'wrap')
nose.tools.assert_equal(y.overflow, 'wrap')
else:
nose.tools.assert_equal(result, 'clamp')
@tools.setup(progress_bar=True)
def test_all():
"""Verify resolution of all properties
"""
mmerrmsg = r"Non-matching mismatch_alert behaviors \[.*'error'.*\]."
moerrmsg = r"Non-matching overflow behaviors"
mrerrmsg = r"Non-matching rounding behaviors"
maerrmsg = r"Non-matching overflow_alert behaviors"
mierrmsg = r"Non-matching implicit_cast_alert behaviors"
mmwarnmsg = r"Using 'warning'\."
mowarnmsg = r"Using 'clamp'\."
mrwarnmsg = r"Using %r\."
mawarnmsg = r"Using %r\."
miwarnmsg = r"Using %r\."
str_bases = tuple(uut.properties.StrConv.keys())
alerts = tuple(x.name for x in uut.properties.Alert)
overflows = tuple(x.name for x in uut.properties.Overflow)
roundings = tuple(x.name for x in uut.properties.Rounding)
x = uut.FixedPoint('0b0110', 0, 2, 2)
y = uut.FixedPoint('0b0110', 0, 2, 2)
UUT = uut.properties.PropertyResolver()
for _ in tools.test_iterator():
x.signed, y.signed = signed = random.randrange(2), random.randrange(2)
x.str_base, y.str_base = S = random.choices(str_bases, k=2)
x.mismatch_alert, y.mismatch_alert = M = random.choices(alerts, k=2)
x.overflow_alert, y.overflow_alert = A = random.choices(alerts, k=2)
x.implicit_cast_alert, y.implicit_cast_alert = I = random.choices(alerts, k=2)
x.overflow, y.overflow = O = random.choices(overflows, k=2)
x.rounding, y.rounding = R = random.choices(roundings, k=2)
UTLOG.debug("x: %s\ny: %s",
json.dumps(UUT.all(x), indent=2),
json.dumps(UUT.all(y), indent=2),
**LOGID
)
if 'error' in M:
if len(set(M)) != 1:
errmsg = mmerrmsg
elif len(set(O)) != 1:
errmsg = moerrmsg
elif len(set(R)) != 1:
errmsg = mrerrmsg
elif len(set(A)) != 1:
errmsg = maerrmsg
elif len(set(I)) != 1:
errmsg = mierrmsg
else:
errmsg = 'no mismatches!'
try:
result = UUT.all(x, y)
except uut.MismatchError as exc:
nose.tools.assert_regex(str(exc), errmsg)
continue
else:
nose.tools.assert_equal(errmsg, 'no mismatches!')
elif 'warning' in M:
errmsg, warnmsg = [], []
if len(set(M)) != 1:
errmsg.append(mmerrmsg.replace('error', 'warning'))
warnmsg.append(mmwarnmsg)
if len(set(O)) != 1:
errmsg.append(moerrmsg)
warnmsg.append(mowarnmsg)
if len(set(R)) != 1:
errmsg.append(mrerrmsg)
warnmsg.append(mrwarnmsg % (
(
'convergent' if 'convergent' in R else
'nearest' if 'nearest' in R else
'down' if 'down' in R else
'in' if 'in' in R else 'out'
) if any(signed) else (
'nearest' if 'nearest' in R else
'convergent' if 'convergent' in R else
'down' if 'down' in R else
'in' if 'in' in R else 'out'
)
)
)
if len(set(A)) != 1:
errmsg.append(maerrmsg)
warnmsg.append(mawarnmsg % ('error' if 'error' in A else 'warning'))
if len(set(I)) != 1:
errmsg.append(mierrmsg)
warnmsg.append(miwarnmsg % ('warning' if 'warning' in I else 'error'))
with tools.CaptureWarnings() as warn:
result = UUT.all(x, y)
for i, log in enumerate(warn.logs):
nose.tools.assert_regex(log, (warnmsg if i % 2 else errmsg)[i // 2],
f'{i}:\n{json.dumps(errmsg, indent=2)}\n{json.dumps(warnmsg, indent=2)}\n')
# mismatch_alert set to 'ignore'
else:
with tools.CaptureWarnings() as warn:
result = UUT.all(x, y)
nose.tools.assert_equal(len(warn.logs), 0)
# Verify results
if (rM := result['mismatch_alert']) == 'ignore':
nose.tools.assert_not_in('warning', M)
nose.tools.assert_not_in('error', M)
elif rM == 'error':
nose.tools.assert_not_in('warning', M)
if result['overflow'] == 'wrap':
nose.tools.assert_not_in('clamp', O)
if (rR := result['rounding']) == 'up':
nose.tools.assert_not_in('out', R)
nose.tools.assert_not_in('in', R)
nose.tools.assert_not_in('down', R)
nose.tools.assert_not_in('nearest', R)
nose.tools.assert_not_in('convergent', R)
elif rR == 'out':
nose.tools.assert_not_in('in', R)
nose.tools.assert_not_in('down', R)
nose.tools.assert_not_in('nearest', R)
nose.tools.assert_not_in('convergent', R)
elif rR == 'in':
nose.tools.assert_not_in('down', R)
nose.tools.assert_not_in('nearest', R)
nose.tools.assert_not_in('convergent', R)
elif rR == 'down':
nose.tools.assert_not_in('nearest', R)
nose.tools.assert_not_in('convergent', R)
elif rR == 'nearest' and any(signed):
nose.tools.assert_not_in('convergent', R)
elif rR == 'convergent' and not any(signed):
nose.tools.assert_not_in('nearest', R)
if (rA := result['overflow_alert']) == 'ignore':
nose.tools.assert_not_in('warning', A)
nose.tools.assert_not_in('error', A)
elif rA == 'warning':
nose.tools.assert_not_in('error', A)
if (rI := result['implicit_cast_alert']) == 'ignore':
nose.tools.assert_not_in('warning', I)
nose.tools.assert_not_in('error', I)
elif rI == 'error':
nose.tools.assert_not_in('warning', I)
if (rS := result['str_base']) != 16:
nose.tools.assert_not_in(16, S)
| StarcoderdataPython |
3314946 | from __future__ import print_function
# Adapted from https://github.com/benthor/remotty/blob/master/socketclient.py
import sys
import tty
import fcntl
import os
import termios
import threading
import errno
import logging
log = logging.getLogger(__name__)
class SocketClient:
def __init__(self,
socket_in=None,
socket_out=None,
socket_err=None,
raw=True,
):
self.socket_in = socket_in
self.socket_out = socket_out
self.socket_err = socket_err
self.raw = raw
self.stdin_fileno = sys.stdin.fileno()
def __enter__(self):
self.create()
return self
def __exit__(self, type, value, trace):
self.destroy()
def create(self):
if os.isatty(sys.stdin.fileno()):
self.settings = termios.tcgetattr(sys.stdin.fileno())
else:
self.settings = None
if self.socket_in is not None:
self.set_blocking(sys.stdin, False)
self.set_blocking(sys.stdout, True)
self.set_blocking(sys.stderr, True)
if self.raw:
tty.setraw(sys.stdin.fileno())
def set_blocking(self, file, blocking):
fd = file.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = (flags & ~os.O_NONBLOCK) if blocking else (flags | os.O_NONBLOCK)
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def run(self):
if self.socket_in is not None:
self.start_background_thread(target=self.send, args=(self.socket_in, sys.stdin))
recv_threads = []
if self.socket_out is not None:
recv_threads.append(self.start_background_thread(target=self.recv, args=(self.socket_out, sys.stdout)))
if self.socket_err is not None:
recv_threads.append(self.start_background_thread(target=self.recv, args=(self.socket_err, sys.stderr)))
for t in recv_threads:
t.join()
def start_background_thread(self, **kwargs):
thread = threading.Thread(**kwargs)
thread.daemon = True
thread.start()
return thread
def recv(self, socket, stream):
try:
while True:
chunk = socket.recv(4096)
if chunk:
stream.write(chunk)
stream.flush()
else:
break
except Exception as e:
log.debug(e)
def send(self, socket, stream):
while True:
chunk = stream.read(1)
if chunk == '':
socket.close()
break
else:
try:
socket.send(chunk)
except Exception as e:
if hasattr(e, 'errno') and e.errno == errno.EPIPE:
break
else:
raise e
def destroy(self):
if self.settings is not None:
termios.tcsetattr(self.stdin_fileno, termios.TCSADRAIN, self.settings)
sys.stdout.flush()
if __name__ == '__main__':
import websocket
if len(sys.argv) != 2:
sys.stderr.write("Usage: python socketclient.py WEBSOCKET_URL\n")
sys.exit(1)
url = sys.argv[1]
socket = websocket.create_connection(url)
print("connected\r")
with SocketClient(socket, interactive=True) as client:
client.run()
| StarcoderdataPython |
5051979 | import argparse
import datetime
import gym
import numpy as np
import itertools
import torch
import imageio
import envs
from torch.utils.data import DataLoader, ConcatDataset
from padding_onehot.replay_memory_dataset import ReplayMemoryDataset
from padding_onehot.skeleton_encoder import SkeletonEncoder
from padding_onehot.motion_encoder import MotionEncoder
from padding_onehot.motion_decoder import MotionDecoder
from padding_onehot.model import VAE
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env1-name', default="ant",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--env2-name', default="ant3",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--model_path',
help='model path')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--hidden_dim', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--latent_dim', type=int, default=10,
help='Encoder latent dimension')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
parser.add_argument('--actor_path',
help='checkpoint training model every # steps')
parser.add_argument('--agent_memory1', default='data/ant.memory',
help='Path for saved replay memory')
parser.add_argument('--video_file_name',
help='output file name')
args = parser.parse_args()
# Environment
# env = NormalizedActions(gym.make(args.env_name))
env1 = envs.load(args.env1_name)
env2 = envs.load(args.env2_name)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Agent
device = torch.device("cuda" if args.cuda else "cpu")
dataset1 = ReplayMemoryDataset(args.agent_memory1, torch.tensor([1., 0.]))
state_size = env1.observation_space.shape[0]
motion_encoder = MotionEncoder(state_size,
hidden_dim=args.hidden_dim,
latent_dim=args.latent_dim).to(device=device)
skeleton_encoder = SkeletonEncoder(2,
hidden_dim=args.hidden_dim,
latent_dim=args.latent_dim).to(device=device)
decoder = MotionDecoder(args.latent_dim * 2,
hidden_dim=args.hidden_dim,
output_dim=state_size).to(device=device)
model = VAE(motion_encoder, skeleton_encoder, decoder)
model.load_model(args.model_path)
# Evaluation loop
total_numsteps = 0
avg_reward = 0.
render_env = env1
state = render_env.reset()
with imageio.get_writer(args.video_file_name, fps=30) as video:
for idx, x, in enumerate(dataset1):
state = x[0]
label = x[5]
x_hat, _, _ = model((state, label))
render_env.set_to_observation(x_hat.detach().numpy())
video.append_data(render_env.render('rgb_array'))
if idx > 1000:
break
env1.close()
env2.close()
| StarcoderdataPython |
11205191 | def response(hey_bob):
hey_bob = hey_bob.strip()
if hey_bob == "":
return "Fine. Be that way!"
elif hey_bob[-1] == '?': # will give you an error if the string is empty
if hey_bob.isupper():
return "Calm down, I know what I'm doing!"
else:
return "Sure."
elif hey_bob.isupper():
return "Whoa, chill out!"
else:
return "Whatever."
# The key was the string method `isupper()`. It handles most edge cases perfectly.
# Link: <https://www.w3schools.com/python/ref_string_isupper.asp>
| StarcoderdataPython |
5013962 | from datetime import datetime
from typing import List
from fastapi_mqtt import FastMQTT
from app import logger
from app.cache.model import Product as CacheProduct, Coupon as CacheCoupon
from app.config.config import COUPON_PREDICTION_TOPIC_NAME
from app.event_emitters.model import Customer, PredictionResult, RecommendedCoupon
class PredictionProducer:
def __init__(self, mqtt: FastMQTT):
self._mqtt = mqtt
self._topic_name = COUPON_PREDICTION_TOPIC_NAME
async def publish(self, customer_id: str, coupon_info: CacheCoupon, products: List[CacheProduct]):
message = self._create_message(customer_id, coupon_info, products)
logger.info(f'Publishing message: {message}')
self._mqtt.publish(self._topic_name, message)
def _create_message(self, customer_id: str, coupon_info: CacheCoupon, products: List[CacheProduct]) -> str:
result = PredictionResult(
customer=Customer(customer_id=customer_id),
coupon=RecommendedCoupon(
products=products,
**coupon_info.dict()
),
ts=datetime.utcnow().timestamp())
return result.json()
| StarcoderdataPython |
5194135 | <reponame>nortti/trump-tweet-reception-predictor
#!/usr/bin/env python
import os
import sys
import json
import urllib
from requests_oauthlib import OAuth1Session
def get_tweets_json(screen_name):
client_key = os.environ['client_key']
client_secret = os.environ['client_secret']
twitter = OAuth1Session(client_key, client_secret)
base_url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?'
params = {'screen_name': screen_name,
'count': 200, # Maximum for each request
'exclude_replies': True, # Don't need replies
'include_rts': False} # Don't want retweets
url = base_url + urllib.parse.urlencode(params)
json_data = twitter.get(url).json()
if not json_data:
sys.exit('User has no tweets')
if 'errors' in json_data:
error = json_data.get('errors')[0]
if error.get('code') is 34:
sys.exit('User does not exist')
else:
sys.exit(error)
print('Getting tweets from %s...' % screen_name)
# A single response includes up to 200 tweets. We can get as many as the
# API lets us (up to 3200) by continually making requests with the max_id
# parameter set to the id of the last tweet in the previous response - 1,
# as described in
# https://developer.twitter.com/en/docs/tweets/timelines/guides/working-with-timelines
while True:
max_id = json_data[-1]['id'] - 1
next_url = url + '&max_id=' + str(max_id)
tweet_batch = twitter.get(next_url).json()
if not tweet_batch:
break
else:
json_data += tweet_batch
return json.dumps(json_data)
| StarcoderdataPython |
188196 | <gh_stars>0
from configparser import ConfigParser
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
config = ConfigParser()
config.read('db.ini')
if config['database']['type'] == 'sqlite':
path = config['sqlite']['path']
engine = create_engine("sqlite:///" + path, echo=False, pool_recycle=7200)
elif config['database']['type'] == 'mysql':
db_conf = config['mysql']
engine = create_engine(f"mysql://{db_conf['user']}:{db_conf['password']}@{db_conf['host']}/{db_conf['database']}",
echo=False, pool_recycle=7200)
else:
raise Exception("You should configure database correctly")
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
| StarcoderdataPython |
3426592 | <gh_stars>1-10
# TOOL gunzip.py: "Extract .gz file" (Extract a gzip file, which usually has a file extension .gz)
# INPUT input_file: "Gzip file" TYPE GENERIC (Gzip compressed file)
# OUTPUT output_file: "Extracted file"
import gzip
import shutil
from tool_utils import *
def main():
infile = gzip.open('input_file', 'rb')
with open('output_file', 'wb') as outfile:
# copy in chunks
shutil.copyfileobj(infile, outfile)
infile.close()
# set dataset name
input_name = read_input_definitions()['input_file']
output_names = {'output_file': remove_postfix(input_name, '.gz')}
write_output_definitions(output_names)
if __name__ == "__main__":
main()
| StarcoderdataPython |
105114 | import numpy as np
from scipy.interpolate import RectBivariateSpline
def TemplateCorrection(T, It1, rect, p0 = np.zeros(2)):
threshold = 0.1
x1_t, y1_t, x2_t, y2_t = rect[0], rect[1], rect[2], rect[3]
Iy, Ix = np.gradient(It1)
rows_img, cols_img = It1.shape
rows_rect, cols_rect = T.shape
dp = [[cols_img], [rows_img]]
# what can be precomputed
y = np.arange(0, rows_img, 1)
x = np.arange(0, cols_img, 1)
spline1 = RectBivariateSpline(y, x, It1)
spline_gx = RectBivariateSpline(y, x, Ix)
spline_gy = RectBivariateSpline(y, x, Iy)
jac = np.array([[1,0],[0,1]])
while np.square(dp).sum() > threshold:
x1_w, y1_w = x1_t + p0[0], y1_t + p0[1]
x2_w, y2_w = x2_t + p0[0], y2_t + p0[1]
cw = np.linspace(x1_w, x2_w, cols_rect)
rw = np.linspace(y1_w, y2_w, rows_rect)
ccw, rrw = np.meshgrid(cw, rw)
warpImg = spline1.ev(rrw, ccw)
#compute error image
err = T - warpImg
errImg = err.reshape(-1,1)
#compute gradient
Ix_w = spline_gx.ev(rrw, ccw)
Iy_w = spline_gy.ev(rrw, ccw)
#I is (n,2)
I = np.vstack((Ix_w.ravel(),Iy_w.ravel())).T
#computer Hessian
delta = I @ jac
#H is (2,2)
H = delta.T @ delta
#compute dp
#dp is (2,2)@(2,n)@(n,1) = (2,1)
dp = np.linalg.inv(H) @ (delta.T) @ errImg
#update parameters
p0[0] += dp[0,0]
p0[1] += dp[1,0]
rect[0] += p0[0]
rect[1] += p0[1]
rect[2] += p0[0]
rect[3] += p0[1]
| StarcoderdataPython |
6431393 | import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import patheffects, patches
def show_img(img, figsize=None, fig=None, ax=None):
if not ax:
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(img)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
return fig, ax
def draw_outline(obj, line_width):
obj.set_path_effects(
[
patheffects.Stroke(linewidth=line_width, foreground='black'),
patheffects.Normal(),
]
)
def draw_rect(ax, box):
patch = ax.add_patch(
patches.Rectangle(box[:2], *box[-2:], fill=False, edgecolor='white', lw=2)
)
draw_outline(patch, 4)
def draw_text(ax, xy, txt, sz=14):
text = ax.text(
*xy, txt, verticalalignment='top', color='white', fontsize=sz, weight='bold'
)
draw_outline(text, 1)
| StarcoderdataPython |
246323 | <filename>Fancy_aggregations/supervised_MPA.py
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 13:30:43 2020
@author: javi-
"""
import numpy as np
import sklearn.linear_model
from . import penalties as pn
from . import binary_parser as bp
# =============================================================================
# MPA ALPHA FORWARDS
# =============================================================================
def mpa_aggregation(logits, agg1, agg2, alpha, keepdims=False):
n_2 = len(logits)
n_1, samples, clases = logits[0].shape
res = np.zeros((n_2, samples, clases))
for ix, logit in enumerate(logits):
res[ix, :, :] = agg1(logit, axis=0, keepdims=False, alpha=alpha[ix])
return agg2(res, axis=0, keepdims=keepdims, alpha=alpha[-1])
def logistic_alpha_forward(X, cost_convex, clf):
'''
X shape: (bandas, samples, clases)
out shape: (samples, clases)
'''
reformed_X = np.swapaxes(X, 0, 1)
reformed_X = reformed_X.reshape((reformed_X.shape[0], reformed_X.shape[1]*reformed_X.shape[2]))
alphas = clf.predict(reformed_X)
result = np.zeros((X.shape[1], X.shape[2]))
for sample in range(X.shape[1]):
alpha_cost = lambda real, yhat, axis: cost_convex(real, yhat, axis, alphas[sample])
result[sample] = pn.penalty_aggregation(X[:, sample, :], [bp.parse(x) for x in bp.classic_aggs], axis=0, keepdims=False, cost=alpha_cost)
return result
def multimodal_alpha_forward(X, cost, cost2, alpha, agg_set=bp.classic_aggs):
'''
X shape: list of n arrays (bandas, samples, clases)
clfs: list of alphas.
out shape: (samples, clases)
'''
david_played_and_it_pleased_the_lord = [bp.parse(x) for x in agg_set]
agg_phase_1 = lambda X0, alpha, keepdims=False, axis=0: pn.penalty_aggregation(X0, david_played_and_it_pleased_the_lord, axis=axis, keepdims=keepdims, cost=lambda real, yhat, axis: cost(real, yhat, axis, alpha=alpha))
agg_phase_2 = lambda X0, alpha, keepdims=False, axis=0: pn.penalty_aggregation(X0, david_played_and_it_pleased_the_lord, axis=axis, keepdims=keepdims, cost=lambda real, yhat, axis: cost2(real, yhat, axis, alpha=alpha))
return mpa_aggregation(X, agg_phase_1, agg_phase_2, alpha, keepdims=False)
# =============================================================================
# LEARN ALPHA - VARIABILITY MAX ALGORITHM UNI-MODAL
# =============================================================================
def generate_real_alpha(X, y, aggs, cost, opt=1):
a = None
b = None
for alpha in np.arange(0.01, 1.01, 0.1):
alpha_cost = lambda X, yhat, axis: cost(X, yhat, axis, alpha)
pagg = pn.penalty_aggregation(X, [bp.parse(x) for x in aggs], axis=0, keepdims=False, cost=alpha_cost)
if np.argmax(pagg) == y:
if a is None:
a = alpha
else:
b = alpha
if a is None:
a = 0.5
if b is None:
b = 0.5
d1 = np.abs(a - 0.5)
d2 = np.abs(b - 0.5)
if opt == 1:
if d1 <= d2:
return a
else:
return b
elif opt == 2:
return (a + b) / 2
def generate_train_data_alpha(logits, labels, aggs=bp.classic_aggs, cost=pn.cost_functions[0], opt=1):
'''
Generates and return the alpha targets for a series of data and their labels,
using the specified aggregations and cost functions in a MPA.
'''
bands, samples, classes = logits.shape
y = np.zeros((samples,))
for sample in range(samples):
y[sample] = generate_real_alpha(logits[:,sample,:], labels[sample], aggs, cost, opt=opt)
return y
def learn_model(X, y, cost, aggs=bp.classic_aggs, opt=1):
'''
X shape: list of n arrays (bandas, samples, clases)
out shape: (samples, clases)
Parameters
----------
X : TYPE
DESCRIPTION.
Returns
-------
None.
'''
X_reshaped = np.swapaxes(X, 0, 1)
X_reshaped = X_reshaped.reshape((X_reshaped.shape[0], X_reshaped.shape[1]*X_reshaped.shape[2]))
y_alpha = generate_train_data_alpha(X, y, aggs=aggs, cost=cost, opt=opt)
clf = sklearn.linear_model.LinearRegression().fit(X_reshaped, y_alpha)
return clf
# =============================================================================
# MULTIMODAL ALPHA OPTIMIZATION - LEAST SQAURES VARAIBLITY + ACC ALGORITHM
# =============================================================================
def eval_alpha(alpha_v, y_hat, y):
'''
Returns
-------
None.
'''
alpha_score = np.mean(np.minimum(alpha_v, 1 - alpha_v))
acc_score = np.mean(np.equal(y_hat, y))
return (alpha_score + acc_score) / 2
def eval_conf(X, alpha, y, agg1, agg2):
'''
Computes the mpa agg for X, and returns the optimization score.
Parameters
----------
X : TYPE
DESCRIPTION.
alpha : TYPE
DESCRIPTION.
y : TYPE
DESCRIPTION.
agg1 : TYPE
DESCRIPTION.
agg2 : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
'''
y_hat = np.argmax(mpa_aggregation(X, agg1, agg2, alpha), axis=1)
return eval_alpha(alpha, y_hat, y)
def gen_all_good_alpha_mff(X, y, costs, aggs=bp.choquet_family + bp.sugeno_family + bp.overlap, opt=1, four_class=False):
'''
Learn the logistic regression for the whole set of datasets.
'''
from scipy.optimize import least_squares
agg_phases = [lambda X0, alpha, keepdims=False, axis=0: pn.penalty_aggregation(X0, aggs, axis=axis, keepdims=keepdims, cost=lambda real, yhat, axis: costs[ix](real, yhat, axis, alpha=alpha)) for ix in range(costs)]
optimize_lambda = lambda alpha: -eval_conf(X, alpha, y, agg_phases) #Remember we are minimizng
x0_alpha = np.array([0.5] * len(X) + [0.5]) #WIP: chek the size of each phase.
res_1 = least_squares(optimize_lambda, x0_alpha, bounds=[0.0001, 0.9999])
return res_1.x | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.