hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a02853e5bb0ab7ebe63cdf1b789207013fbe2bb
| 2,334
|
py
|
Python
|
estimagic/tests/differentiation/test_differentiation.py
|
Pascalheid/estimagic
|
2503eaf9553c9e09fab9014aa54e84ed83d563fa
|
[
"BSD-3-Clause"
] | null | null | null |
estimagic/tests/differentiation/test_differentiation.py
|
Pascalheid/estimagic
|
2503eaf9553c9e09fab9014aa54e84ed83d563fa
|
[
"BSD-3-Clause"
] | null | null | null |
estimagic/tests/differentiation/test_differentiation.py
|
Pascalheid/estimagic
|
2503eaf9553c9e09fab9014aa54e84ed83d563fa
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import product
from pathlib import Path
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from pandas.testing import assert_series_equal
from estimagic.differentiation.differentiation import gradient
from estimagic.differentiation.differentiation import hessian
from estimagic.differentiation.differentiation import jacobian
from estimagic.examples.logit import logit_loglike
from estimagic.examples.logit import logit_loglikeobs
@pytest.fixture()
def statsmodels_fixtures():
fix_path = Path(__file__).resolve().parent / "diff_fixtures.pickle"
fix = pd.read_pickle(fix_path)
fix["params"] = fix["params"].to_frame()
fix["gradient"].name = "gradient"
return fix
to_test = list(product(["forward", "central", "backward"], [True, False]))
@pytest.mark.parametrize("method, extrapolation", to_test)
def test_gradient(statsmodels_fixtures, method, extrapolation):
fix = statsmodels_fixtures
func_kwargs = {"y": fix["y"], "x": fix["x"]}
calculated = gradient(
logit_loglike,
fix["params"],
method=method,
extrapolation=extrapolation,
func_kwargs=func_kwargs,
step_options={"step_ratio": 2.0},
)
expected = fix["gradient"]
assert_series_equal(calculated, expected)
@pytest.mark.parametrize("method, extrapolation", to_test)
def test_jacobian(statsmodels_fixtures, method, extrapolation):
fix = statsmodels_fixtures
func_kwargs = {"y": fix["y"], "x": fix["x"]}
calculated = jacobian(
logit_loglikeobs,
params=fix["params"],
method=method,
extrapolation=extrapolation,
func_kwargs=func_kwargs,
step_options={"step_ratio": 2.0},
)
expected = fix["jacobian"]
assert_frame_equal(calculated, expected)
to_test_hess = [("central", True), ("central", False)]
@pytest.mark.parametrize("method, extrapolation", to_test_hess)
def test_hessian(statsmodels_fixtures, method, extrapolation):
fix = statsmodels_fixtures
calculated = hessian(
logit_loglike,
fix["params"],
method=method,
extrapolation=extrapolation,
func_kwargs={"y": fix["y"], "x": fix["x"]},
step_options={"step_ratio": 2.0},
)
expected = fix["hessian"]
assert_frame_equal(calculated, expected)
| 29.923077
| 74
| 0.702656
|
4a0286f28a0983db381a730b8c8a3c02d2ffc624
| 2,130
|
py
|
Python
|
dbt_cloud/command/job/run.py
|
ernestoongaro/dbt-cloud-cli
|
4a6e88564d16991a7a6ae4f76820e58674dc353d
|
[
"Apache-2.0"
] | null | null | null |
dbt_cloud/command/job/run.py
|
ernestoongaro/dbt-cloud-cli
|
4a6e88564d16991a7a6ae4f76820e58674dc353d
|
[
"Apache-2.0"
] | null | null | null |
dbt_cloud/command/job/run.py
|
ernestoongaro/dbt-cloud-cli
|
4a6e88564d16991a7a6ae4f76820e58674dc353d
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
from typing import Optional, List
from pydantic import Field, validator
from dbt_cloud.command.command import DbtCloudCommand
from dbt_cloud.field import JOB_ID_FIELD
class DbtCloudJobRunCommand(DbtCloudCommand):
"""Triggers a dbt Cloud job run and returns a status JSON response."""
job_id: int = JOB_ID_FIELD
cause: str = Field(
default="Triggered via API",
description="A text description of the reason for running this job",
)
git_sha: Optional[str] = Field(
description="The git sha to check out before running this job"
)
git_branch: Optional[str] = Field(
description="The git branch to check out before running this job"
)
schema_override: Optional[str] = Field(
description="Override the destination schema in the configured target for this job"
)
dbt_version_override: Optional[str] = Field(
description="Override the version of dbt used to run this job"
)
threads_override: Optional[int] = Field(
description="Override the number of threads used to run this job"
)
target_name_override: Optional[str] = Field(
description="Override the target.name context variable used when running this job"
)
generate_docs_override: Optional[bool] = Field(
description="Override whether or not this job generates docs (true=yes, false=no)"
)
timeout_seconds_override: Optional[int] = Field(
description="Override the timeout in seconds for this job"
)
steps_override: Optional[List[str]] = Field(
description="Override the list of steps for this job"
)
@validator("steps_override")
def check_steps_override_is_none_if_empty(cls, value):
return value or None
@property
def api_url(self) -> str:
api_url = f"{super().api_url}/jobs/{self.job_id}/run/"
return api_url
def execute(self) -> requests.Response:
response = requests.post(
url=self.api_url,
headers=self.request_headers,
json=self.get_payload(),
)
return response
| 34.918033
| 91
| 0.682629
|
4a028714477c1cf639ea0944ac9355e57a0236b5
| 392
|
py
|
Python
|
examples/introduction.to.programming.with.turtle/for_all/5-2-3.Outer.triangle.py
|
strakam/PyEasyGraphics
|
57a586aa92385d26725d4ec3d61b2bbbe970195d
|
[
"BSD-3-Clause"
] | 5
|
2019-09-23T05:15:47.000Z
|
2021-01-17T08:06:47.000Z
|
examples/introduction.to.programming.with.turtle/for_all/5-2-3.Outer.triangle.py
|
strakam/PyEasyGraphics
|
57a586aa92385d26725d4ec3d61b2bbbe970195d
|
[
"BSD-3-Clause"
] | 3
|
2019-05-03T05:25:17.000Z
|
2021-04-15T04:53:16.000Z
|
examples/introduction.to.programming.with.turtle/for_all/5-2-3.Outer.triangle.py
|
strakam/PyEasyGraphics
|
57a586aa92385d26725d4ec3d61b2bbbe970195d
|
[
"BSD-3-Clause"
] | 4
|
2019-05-04T13:42:40.000Z
|
2021-04-15T10:38:48.000Z
|
from easygraphics.turtle import *
def outward_tri(size, level):
if level == 0:
return
for i in range(3):
forward(size / 2)
lt(120)
outward_tri(size / 2, level - 1)
rt(120)
forward(size / 2)
rt(120)
def main():
create_world(800, 600)
set_speed(100)
outward_tri(100, 4)
pause()
close_world()
easy_run(main)
| 17.818182
| 40
| 0.556122
|
4a0287a86b2fbb9578b7c13333948d663626f7ec
| 4,052
|
py
|
Python
|
entities/models.py
|
AdirthaBorgohain/Agency-CRM
|
9e9f377c5967fdd20230ab8b558623dc2a1a6403
|
[
"MIT"
] | null | null | null |
entities/models.py
|
AdirthaBorgohain/Agency-CRM
|
9e9f377c5967fdd20230ab8b558623dc2a1a6403
|
[
"MIT"
] | null | null | null |
entities/models.py
|
AdirthaBorgohain/Agency-CRM
|
9e9f377c5967fdd20230ab8b558623dc2a1a6403
|
[
"MIT"
] | null | null | null |
from django.db import models
from datetime import datetime
# Create your models here.
CATEGORY_CHOICES = (
("Newspaper", "Newspaper"),
("Magazine", "Magazine")
)
LANGUAGE_CHOICES = (
("Assamese", "Assamese"),
("English", "English"),
("Hindi", "Hindi"),
("Bengali", "Bengali"),
("Others", "Others")
)
class Customer(models.Model):
id = models.CharField(max_length=8, primary_key=True)
name = models.CharField(max_length=100)
address = models.CharField(max_length=100)
contact = models.CharField(max_length=12, unique=True)
def __str__(self):
return self.name
@property
def encoded_id(self):
return self.id.replace('/', '__')
class Agent(models.Model):
id = models.CharField(max_length=8, primary_key=True)
name = models.CharField(max_length=100)
address = models.CharField(max_length=100)
contact = models.CharField(max_length=12, unique=True)
commission = models.DecimalField(max_digits=10, decimal_places=2)
def __str__(self):
return self.name
@property
def encoded_id(self):
return self.id.replace('/', '__')
class Product(models.Model):
name = models.CharField(max_length=20, unique=True)
language = models.CharField(
max_length=10, choices=LANGUAGE_CHOICES, default="Assamese")
category = models.CharField(
max_length=10, choices=CATEGORY_CHOICES, default="Newspaper")
price = models.DecimalField(max_digits=10, decimal_places=2)
def __str__(self):
return self.name
class Invoice(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.CASCADE)
create_date = models.DateField()
start_date = models.DateField()
end_date = models.DateField()
additional_charges = models.DecimalField(max_digits=10, decimal_places=2, default=0)
grand_total = models.DecimalField(max_digits=10, decimal_places=2)
paid_amount = models.DecimalField(max_digits=10, decimal_places=2)
is_paid = models.BooleanField(default=False)
def __str__(self):
return self.customer.name + " (" + str(self.start_date.strftime("%B")) + ")"
@property
def bill_period(self):
return '{} -- {}'.format(self.start_date.strftime("%d/%m/%Y"), self.end_date.strftime("%d/%m/%Y"))
class Bill(models.Model):
agent = models.ForeignKey(Agent, on_delete=models.CASCADE)
create_date = models.DateField()
start_date = models.DateField()
end_date = models.DateField()
deductions = models.DecimalField(max_digits=10, decimal_places=2, default=0)
prev_balance = models.DecimalField(max_digits=10, decimal_places=2)
grand_total = models.DecimalField(max_digits=10, decimal_places=2)
paid_amount = models.DecimalField(max_digits=10, decimal_places=2)
is_paid = models.BooleanField(default=False)
def __str__(self):
return self.agent.name + " (" + str(self.start_date.strftime("%B")) + ")"
@property
def bill_period(self):
return '{} -- {}'.format(self.start_date.strftime("%d/%m/%Y"), self.end_date.strftime("%d/%m/%Y"))
class OrderDetails(models.Model):
invoice = models.ForeignKey(Invoice, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.DO_NOTHING)
quantity = models.IntegerField()
price = models.DecimalField(max_digits=10, decimal_places=2)
net_price = models.DecimalField(max_digits=10, decimal_places=2)
def __str__(self):
return self.invoice.customer.name + " (" + str(self.invoice.start_date.strftime("%B")) + ")-" + self.product.name
class BillDetails(models.Model):
bill = models.ForeignKey(Bill, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.DO_NOTHING)
quantity = models.IntegerField()
price = models.DecimalField(max_digits=10, decimal_places=2)
net_price = models.DecimalField(max_digits=10, decimal_places=2)
def __str__(self):
return self.bill.agent.name + " (" + str(self.bill.start_date.strftime("%B")) + ")-" + self.product.name
| 34.931034
| 121
| 0.695459
|
4a0287f13e8f3080817e0cf7cf336e72d3888f0a
| 14,316
|
py
|
Python
|
person-detection-yolo-master/preprocessing.py
|
Jeevananthamcse/Palanisamy
|
9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c
|
[
"Unlicense"
] | 2
|
2021-08-30T08:04:04.000Z
|
2021-09-27T06:01:05.000Z
|
person-detection-yolo-master/preprocessing.py
|
Jeevananthamcse/Palanisamy
|
9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c
|
[
"Unlicense"
] | 1
|
2022-02-08T00:01:16.000Z
|
2022-02-08T00:01:16.000Z
|
person-detection-yolo-master/preprocessing.py
|
Jeevananthamcse/Palanisamy
|
9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c
|
[
"Unlicense"
] | 1
|
2021-09-13T07:03:11.000Z
|
2021-09-13T07:03:11.000Z
|
import os
import cv2
import copy
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
from keras import Sequence
import xml.etree.ElementTree as ET
from utils import BoundBox, bbox_iou
def parse_annotation(ann_dir, img_dir, labels=[]):
all_imgs = []
seen_labels = {}
for ann in sorted(os.listdir(ann_dir)):
img = {'object':[]}
tree = ET.parse(ann_dir + ann)
for elem in tree.iter():
if 'filename' in elem.tag:
img['filename'] = img_dir + elem.text
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
img['height'] = int(elem.text)
if 'object' in elem.tag or 'part' in elem.tag:
obj = {}
for attr in list(elem):
if 'name' in attr.tag:
obj['name'] = attr.text
if obj['name'] in seen_labels:
seen_labels[obj['name']] += 1
else:
seen_labels[obj['name']] = 1
if len(labels) > 0 and obj['name'] not in labels:
break
else:
img['object'] += [obj]
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
obj['xmin'] = int(round(float(dim.text)))
if 'ymin' in dim.tag:
obj['ymin'] = int(round(float(dim.text)))
if 'xmax' in dim.tag:
obj['xmax'] = int(round(float(dim.text)))
if 'ymax' in dim.tag:
obj['ymax'] = int(round(float(dim.text)))
if len(img['object']) > 0:
all_imgs += [img]
return all_imgs, seen_labels
class BatchGenerator(Sequence):
def __init__(self, images,
config,
shuffle=True,
jitter=True,
norm=None):
self.generator = None
self.images = images
self.config = config
self.shuffle = shuffle
self.jitter = jitter
self.norm = norm
self.anchors = [BoundBox(0, 0, config['ANCHORS'][2*i], config['ANCHORS'][2*i+1]) for i in range(int(len(config['ANCHORS'])//2))]
### augmentors by https://github.com/aleju/imgaug
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
self.aug_pipe = iaa.Sequential(
[
# apply the following augmenters to most images
#iaa.Fliplr(0.5), # horizontally flip 50% of all images
#iaa.Flipud(0.2), # vertically flip 20% of all images
#sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
sometimes(iaa.Affine(
#scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
#translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
#rotate=(-5, 5), # rotate by -45 to +45 degrees
#shear=(-5, 5), # shear by -16 to +16 degrees
#order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
#cval=(0, 255), # if mode is constant, use a cval between 0 and 255
#mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
#sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
#iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges
#sometimes(iaa.OneOf([
# iaa.EdgeDetect(alpha=(0, 0.7)),
# iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
#])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
#iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
]),
#iaa.Invert(0.05, per_channel=True), # invert color channels
iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
iaa.Multiply((0.5, 1.5), per_channel=0.5), # change brightness of images (50-150% of original value)
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
#iaa.Grayscale(alpha=(0.0, 1.0)),
#sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
#sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
],
random_order=True
)
],
random_order=True
)
if shuffle: np.random.shuffle(self.images)
def __len__(self):
return int(np.ceil(float(len(self.images))/self.config['BATCH_SIZE']))
def num_classes(self):
return len(self.config['LABELS'])
def size(self):
return len(self.images)
def load_annotation(self, i):
annots = []
for obj in self.images[i]['object']:
annot = [obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], self.config['LABELS'].index(obj['name'])]
annots += [annot]
if len(annots) == 0: annots = [[]]
return np.array(annots)
def load_image(self, i):
return cv2.imread(self.images[i]['filename'])
def __getitem__(self, idx):
l_bound = idx*self.config['BATCH_SIZE']
r_bound = (idx+1)*self.config['BATCH_SIZE']
if r_bound > len(self.images):
r_bound = len(self.images)
l_bound = r_bound - self.config['BATCH_SIZE']
instance_count = 0
x_batch = np.zeros((r_bound - l_bound, self.config['IMAGE_H'], self.config['IMAGE_W'], 3)) # input images
b_batch = np.zeros((r_bound - l_bound, 1 , 1 , 1 , self.config['TRUE_BOX_BUFFER'], 4)) # list of self.config['TRUE_self.config['BOX']_BUFFER'] GT boxes
y_batch = np.zeros((r_bound - l_bound, self.config['GRID_H'], self.config['GRID_W'], self.config['BOX'], 4+1+len(self.config['LABELS']))) # desired network output
for train_instance in self.images[l_bound:r_bound]:
# augment input image and fix object's position and size
img, all_objs = self.aug_image(train_instance, jitter=self.jitter)
# construct output from object's x, y, w, h
true_box_index = 0
for obj in all_objs:
if obj['xmax'] > obj['xmin'] and obj['ymax'] > obj['ymin'] and obj['name'] in self.config['LABELS']:
center_x = .5*(obj['xmin'] + obj['xmax'])
center_x = center_x / (float(self.config['IMAGE_W']) / self.config['GRID_W'])
center_y = .5*(obj['ymin'] + obj['ymax'])
center_y = center_y / (float(self.config['IMAGE_H']) / self.config['GRID_H'])
grid_x = int(np.floor(center_x))
grid_y = int(np.floor(center_y))
if grid_x < self.config['GRID_W'] and grid_y < self.config['GRID_H']:
obj_indx = self.config['LABELS'].index(obj['name'])
center_w = (obj['xmax'] - obj['xmin']) / (float(self.config['IMAGE_W']) / self.config['GRID_W']) # unit: grid cell
center_h = (obj['ymax'] - obj['ymin']) / (float(self.config['IMAGE_H']) / self.config['GRID_H']) # unit: grid cell
box = [center_x, center_y, center_w, center_h]
# find the anchor that best predicts this box
best_anchor = -1
max_iou = -1
shifted_box = BoundBox(0,
0,
center_w,
center_h)
for i in range(len(self.anchors)):
anchor = self.anchors[i]
iou = bbox_iou(shifted_box, anchor)
if max_iou < iou:
best_anchor = i
max_iou = iou
# assign ground truth x, y, w, h, confidence and class probs to y_batch
y_batch[instance_count, grid_y, grid_x, best_anchor, 0:4] = box
y_batch[instance_count, grid_y, grid_x, best_anchor, 4 ] = 1.
y_batch[instance_count, grid_y, grid_x, best_anchor, 5+obj_indx] = 1
# assign the true box to b_batch
b_batch[instance_count, 0, 0, 0, true_box_index] = box
true_box_index += 1
true_box_index = true_box_index % self.config['TRUE_BOX_BUFFER']
# assign input image to x_batch
if self.norm != None:
x_batch[instance_count] = self.norm(img)
else:
# plot image and bounding boxes for sanity check
for obj in all_objs:
if obj['xmax'] > obj['xmin'] and obj['ymax'] > obj['ymin']:
cv2.rectangle(img[:,:,::-1], (obj['xmin'],obj['ymin']), (obj['xmax'],obj['ymax']), (255,0,0), 3)
cv2.putText(img[:,:,::-1], obj['name'],
(obj['xmin']+2, obj['ymin']+12),
0, 1.2e-3 * img.shape[0],
(0,255,0), 2)
x_batch[instance_count] = img
# increase instance counter in current batch
instance_count += 1
#print(' new batch created', idx)
return [x_batch, b_batch], y_batch
def on_epoch_end(self):
if self.shuffle: np.random.shuffle(self.images)
def aug_image(self, train_instance, jitter):
image_name = train_instance['filename']
image = cv2.imread(image_name)
if image is None: print('Cannot find ', image_name)
h, w, c = image.shape
all_objs = copy.deepcopy(train_instance['object'])
if jitter:
### scale the image
scale = np.random.uniform() / 10. + 1.
image = cv2.resize(image, (0,0), fx = scale, fy = scale)
### translate the image
max_offx = (scale-1.) * w
max_offy = (scale-1.) * h
offx = int(np.random.uniform() * max_offx)
offy = int(np.random.uniform() * max_offy)
image = image[offy : (offy + h), offx : (offx + w)]
### flip the image
flip = np.random.binomial(1, .5)
if flip > 0.5: image = cv2.flip(image, 1)
image = self.aug_pipe.augment_image(image)
# resize the image to standard size
image = cv2.resize(image, (self.config['IMAGE_H'], self.config['IMAGE_W']))
image = image[:,:,::-1]
# fix object's position and size
for obj in all_objs:
for attr in ['xmin', 'xmax']:
if jitter: obj[attr] = int(obj[attr] * scale - offx)
obj[attr] = int(obj[attr] * float(self.config['IMAGE_W']) / w)
obj[attr] = max(min(obj[attr], self.config['IMAGE_W']), 0)
for attr in ['ymin', 'ymax']:
if jitter: obj[attr] = int(obj[attr] * scale - offy)
obj[attr] = int(obj[attr] * float(self.config['IMAGE_H']) / h)
obj[attr] = max(min(obj[attr], self.config['IMAGE_H']), 0)
if jitter and flip > 0.5:
xmin = obj['xmin']
obj['xmin'] = self.config['IMAGE_W'] - obj['xmax']
obj['xmax'] = self.config['IMAGE_W'] - xmin
return image, all_objs
| 47.092105
| 186
| 0.475552
|
4a0288ab30c23ec7533cd0cd74423c0de03523d6
| 9,784
|
py
|
Python
|
src/pytezos/michelson/macros.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 98
|
2019-02-07T16:33:38.000Z
|
2022-03-31T15:53:41.000Z
|
src/pytezos/michelson/macros.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 152
|
2019-05-20T16:38:56.000Z
|
2022-03-30T14:24:38.000Z
|
src/pytezos/michelson/macros.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 34
|
2019-07-25T12:03:51.000Z
|
2021-11-11T22:23:38.000Z
|
import functools
import re
from collections import namedtuple
from typing import Tuple
from pytezos.michelson.tags import prim_tags
COMPARE = dict(prim='COMPARE')
UNIT = dict(prim='UNIT')
FAILWITH = dict(prim='FAILWITH')
DUP = dict(prim='DUP')
SWAP = dict(prim='SWAP')
CAR = dict(prim='CAR')
CDR = dict(prim='CDR')
CAR__ = dict(prim='CAR', annots=['@%%'])
CDR__ = dict(prim='CDR', annots=['@%%'])
DROP = dict(prim='DROP')
FAIL = [[UNIT, FAILWITH]]
macros = []
PxrNode = namedtuple('PxrNode', ['depth', 'annots', 'args', 'is_root'])
def macro(regexp):
def register_macro(func):
macros.append((re.compile(regexp), func))
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return register_macro
def seq(instr=None) -> list:
if instr is None:
return []
elif isinstance(instr, list):
return instr
else:
return [instr]
def expand_macro(prim, annots, args, internal=False):
""" Expands Michelson macro.
:param prim: macro name
:param annots: annotations (optional)
:param args: arguments (optional)
:param internal: this function is called during another macro expansion
:returns: Code sequence (Micheline expression)
"""
assert isinstance(annots, list)
assert isinstance(args, list)
if prim in prim_tags:
return expr(prim=prim, annots=annots, args=args)
for regexp, handler in macros:
groups = regexp.findall(prim)
if groups:
assert len(groups) == 1
res = handler(groups[0], annots, args)
return res if internal else seq(res)
assert False, f'unknown primitive `{prim}`'
def get_field_annots(annots):
return list(filter(lambda x: isinstance(x, str) and x[0] == '%', annots))
def get_var_annots(annots):
return list(filter(lambda x: isinstance(x, str) and x[0] == '@', annots))
def skip_nones(array):
return list(filter(lambda x: x is not None, array))
def expr(**kwargs) -> dict:
return {k: v for k, v in kwargs.items() if v}
def dip_n(instr, depth=1):
if depth <= 0:
return instr
elif depth == 1:
return expr(prim='DIP', args=[seq(instr)])
else:
return expr(prim='DIP', args=[{'int': str(depth)}, seq(instr)])
@macro(r'^CMP(EQ|NEQ|LT|GT|LE|GE)$')
def expand_cmpx(prim, annots, args) -> list:
assert not args
return [COMPARE,
expr(prim=prim, annots=annots)]
@macro(r'^IF(EQ|NEQ|LT|GT|LE|GE)$')
def expand_ifx(prim, annots, args) -> list:
assert len(args) == 2
return [expr(prim=prim, annots=annots),
expr(prim='IF', args=args)]
@macro(r'^IFCMP(EQ|NEQ|LT|GT|LE|GE)$')
def expand_ifcmpx(prim, annots, args) -> list:
assert len(args) == 2
return [[COMPARE, expr(prim=prim, annots=annots)],
expr(prim='IF', args=args)]
@macro(r'^FAIL$')
def expand_fail(prim, annots, args) -> list:
assert not annots
assert not args
return [UNIT, FAILWITH]
@macro(r'^ASSERT$')
def expand_assert(prim, annots, args) -> dict:
assert not annots
assert not args
return expr(prim='IF', args=[[], FAIL])
@macro(r'^ASSERT_(EQ|NEQ|LT|LE|GT|GE)$')
def expand_assert_x(prim, annots, args) -> list:
assert not args
assert not annots # TODO: ask why
return expand_ifx(prim, annots=[], args=[[], FAIL])
@macro(r'^ASSERT_CMP(EQ|NEQ|LT|LE|GT|GE)$')
def expand_assert_cmpx(prim, annots, args) -> list:
assert not args
assert not annots # TODO: ask why
return expand_ifcmpx(prim, annots=[], args=[[], FAIL])
@macro(r'^ASSERT_NONE$')
def expand_assert_none(prim, annots, args) -> dict:
assert not annots
assert not args
return expr(prim='IF_NONE', args=[[], FAIL])
@macro('^ASSERT_SOME$')
def expand_assert_some(prim, annots, args) -> dict:
assert not args
return expr(prim='IF_NONE',
args=[FAIL, [expr(prim='RENAME', annots=annots)]])
@macro('^ASSERT_LEFT$')
def expand_assert_left(prim, annots, args) -> dict:
assert not args
return expr(prim='IF_LEFT',
args=[[expr(prim='RENAME', annots=annots)], FAIL])
@macro('^ASSERT_RIGHT$')
def expand_assert_right(prim, annots, args) -> dict:
assert not args
return expr(prim='IF_LEFT',
args=[FAIL, [expr(prim='RENAME', annots=annots)]])
@macro(r'^D(II+)P$')
def expand_dixp(prim, annots, args) -> dict:
assert not annots
assert len(args) == 1
return dip_n(args, depth=len(prim))
@macro(r'^D(UU+)P$')
def expand_duxp(prim, annots, args) -> dict:
assert not args
depth = len(prim)
return expr(prim='DUP', annots=annots, args=[{'int': str(depth)}])
def build_pxr_tree(pxr_macro, pxr_annots) -> PxrNode:
def parse(prim, annots, depth=0, is_root=False):
letter, prim = prim[0], prim[1:]
if letter == 'P':
dip_depth = depth
left, l_annot, prim, annots, depth = parse(prim, annots, depth)
right, r_annot, prim, annots, depth = parse(prim, annots, depth)
return PxrNode(dip_depth, [l_annot, r_annot], [left, right], is_root), None, prim, annots, depth
else:
annot, annots = (annots[0], annots[1:]) if annots else (None, [])
return letter, annot, prim, annots, depth + 1
root, _, _, _, _ = parse(pxr_macro, pxr_annots, is_root=True)
return root
def traverse_pxr_tree(prim, annots, produce):
res = []
def walk(node):
if isinstance(node, PxrNode):
res.insert(0, dip_n(produce(node), depth=node.depth))
_ = list(map(walk, node.args))
walk(build_pxr_tree(prim, annots))
return res
@macro(r'^P[PAI]{3,}R$')
def expand_pxr(prim, annots, args) -> list:
def produce(node: PxrNode):
pair_annots = [node.annots[0] or '%', node.annots[1]] if any(node.annots) else []
if node.is_root:
pair_annots.extend(get_var_annots(annots))
return expr(prim='PAIR', annots=skip_nones(pair_annots))
assert not args
return traverse_pxr_tree(prim, get_field_annots(annots), produce)
@macro(r'^UN(P[PAI]{3,}R)$')
def expand_unpxr(prim, annots, args) -> list:
def produce(node: PxrNode):
return [expr(prim='UNPAIR', annots=skip_nones(node.annots))]
assert not args
return list(reversed(traverse_pxr_tree(prim, annots, produce)))
def expand_cxr(prim, annots) -> list:
return seq(expand_macro(prim=f'C{prim}R', annots=annots, args=[], internal=True))
@macro(r'^CA([AD]+)R$')
def expand_caxr(prim, annots, args) -> list:
assert not args
return [CAR, *expand_cxr(prim, annots)]
@macro(r'^CD([AD]+)R$')
def expand_cdxr(prim, annots, args) -> list:
assert not args
return [CDR, *expand_cxr(prim, annots)]
@macro(r'^IF_SOME$')
def expand_if_some(prim, annots, args) -> dict:
assert not annots
assert len(args) == 2
return expr(prim='IF_NONE', args=list(reversed(args)))
@macro(r'^IF_RIGHT$')
def expand_if_right(prim, annots, args) -> dict:
assert not annots
assert len(args) == 2
return expr(prim='IF_LEFT', args=list(reversed(args)))
@macro(r'^SET_CAR$')
def expand_set_car(prim, annots, args) -> list:
assert not args
return [SWAP, expr(prim='UPDATE', args=[{'int': '1'}], annots=annots)]
@macro(r'^SET_CDR$')
def expand_set_cdr(prim, annots, args) -> list:
assert not args
return [SWAP, expr(prim='UPDATE', args=[{'int': '2'}], annots=annots)]
def expand_set_cxr(prim, annots):
set_cxr = expand_macro(prim=f'SET_C{prim}R', annots=get_field_annots(annots), args=[], internal=True)
pair = expr(prim='PAIR', annots=['%@', '%@'] + get_var_annots(annots))
return set_cxr, pair
@macro(r'^SET_CA([AD]+)R$')
def expand_set_caxr(prim, annots, args) -> list:
assert not args
set_cxr, pair = expand_set_cxr(prim, annots)
return [DUP,
dip_n([CAR__, set_cxr]),
CDR__,
SWAP,
pair]
@macro(r'^SET_CD([AD]+)R$')
def expand_set_cdxr(prim, annots, args) -> list:
assert not args
set_cxr, pair = expand_set_cxr(prim, annots)
return [DUP,
dip_n([CDR__, set_cxr]),
CAR__,
pair]
def get_map_cxr_annots(annots) -> Tuple[str, list]:
field_annots = get_field_annots(annots)
if field_annots:
assert len(field_annots) == 1
return field_annots[0], [f'@{field_annots[0][1:]}']
else:
return '%', []
@macro(r'^MAP_CAR$')
def expand_map_car(prim, annots, args) -> list:
car_annot, var_annots = get_map_cxr_annots(annots)
return [DUP,
CDR__,
dip_n([expr(prim='CAR', annots=var_annots), *args]),
SWAP,
expr(prim='PAIR', annots=[car_annot, '%@'])]
@macro(r'^MAP_CDR$')
def expand_map_cdr(prim, annots, args) -> list:
cdr_annot, var_annots = get_map_cxr_annots(annots)
return [DUP,
expr(prim='CDR', annots=var_annots),
*args,
SWAP,
CAR__,
expr(prim='PAIR', annots=['%@', cdr_annot])]
def expand_map_cxr(prim, annots, args):
set_cxr = expand_macro(prim=f'MAP_C{prim}R', annots=get_field_annots(annots), args=args, internal=True)
pair = expr(prim='PAIR', annots=['%@', '%@'] + get_var_annots(annots))
return set_cxr, pair
@macro(r'^MAP_CA([AD]+)R$')
def expand_map_caxr(prim, annots, args) -> list:
map_cxr, pair = expand_map_cxr(prim, annots, args)
return [DUP,
dip_n([CAR__, map_cxr]),
CDR__,
SWAP,
pair]
@macro(r'^MAP_CD([AD]+)R$')
def expand_map_cdxr(prim, annots, args) -> list:
map_cxr, pair = expand_map_cxr(prim, annots, args)
return [DUP,
dip_n([CDR__, map_cxr]),
CAR__,
pair]
| 27.560563
| 108
| 0.621525
|
4a02890f12d30c602a1055b4693274cf48e82cfa
| 1,382
|
py
|
Python
|
unet/resizeData.py
|
MartimChaves/ret_detect
|
774521a079be4324d542a841c7b3be808c18356b
|
[
"MIT"
] | null | null | null |
unet/resizeData.py
|
MartimChaves/ret_detect
|
774521a079be4324d542a841c7b3be808c18356b
|
[
"MIT"
] | null | null | null |
unet/resizeData.py
|
MartimChaves/ret_detect
|
774521a079be4324d542a841c7b3be808c18356b
|
[
"MIT"
] | null | null | null |
import cv2.cv2 as cv2
import numpy as np
import os
def myShowImage(img,name = "from_show_function"):
cv2.imshow(name, img)
cv2.waitKey(0) # waits until a key is pressed
cv2.destroyAllWindows() # destroys the window showing image
return
for i in range(1,41):
os.chdir("C:/Users/Martim_Pc/Desktop/DACO/PROJECT_DACO/convNet/Unet/")
imgPath = 'Datasets/IDRID training/IDRiD_' + str(i).zfill(2) + '.jpg'
imgPathMasks = 'Datasets/IDRID training/IDRiD_' + str(i).zfill(2) + '_OD.tif'
img = cv2.imread(imgPathMasks,cv2.CV_8UC1)
scale_percent = 5.95 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA) # BGR - blue: 0; green: 1; red: 2
resized = np.subtract(np.multiply(resized,(255/230)),28)
resized[resized < 0] = 0
resized = resized.astype(np.uint8)
finalResized = np.array(resized)
resFin = np.zeros([256,256])
resFin[43:212,0:255]=finalResized
resFin = np.multiply(resFin,255/56) # if masks
resFin = resFin.astype(np.uint8)
os.chdir("C:/Users/Martim_Pc/Desktop/DACO/PROJECT_DACO/convNet/Unet/masks/train")
cv2.imwrite(str(i)+".png", resFin)
#myShowImage(resFin)
| 34.55
| 101
| 0.656295
|
4a0289da758f294dd6c6ae6dd31118eb4b001ff1
| 85,751
|
py
|
Python
|
dbReports/iondb/rundb/migrations/0143_auto__del_field_dmfilestat_filesys_status__add_field_globalconfig_auto.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 125
|
2015-01-22T05:43:23.000Z
|
2022-03-22T17:15:59.000Z
|
dbReports/iondb/rundb/migrations/0143_auto__del_field_dmfilestat_filesys_status__add_field_globalconfig_auto.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 59
|
2015-02-10T09:13:06.000Z
|
2021-11-11T02:32:38.000Z
|
dbReports/iondb/rundb/migrations/0143_auto__del_field_dmfilestat_filesys_status__add_field_globalconfig_auto.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 98
|
2015-01-17T01:25:10.000Z
|
2022-03-18T17:29:42.000Z
|
# Copyright (C) 2013 Ion Torrent Systems, Inc. All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Update state for any existing DMFileStat objects
for filestat in orm.dmfilestat.objects.all():
if filestat.action_state in ['U', 'I', 'D']:
filestat.action_state = 'L'
filestat.save()
# Deleting field 'DMFileStat.filesys_status'
db.delete_column('rundb_dmfilestat', 'filesys_status')
# Adding field 'GlobalConfig.auto_archive_enable'
db.add_column('rundb_globalconfig', 'auto_archive_enable',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Adding field 'DMFileStat.filesys_status'
db.add_column('rundb_dmfilestat', 'filesys_status',
self.gf('django.db.models.fields.CharField')(default='L', max_length=4),
keep_default=False)
# Deleting field 'GlobalConfig.auto_archive_enable'
db.delete_column('rundb_globalconfig', 'auto_archive_enable')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rundb.analysismetrics': {
'Meta': {'object_name': 'AnalysisMetrics'},
'amb': ('django.db.models.fields.IntegerField', [], {}),
'bead': ('django.db.models.fields.IntegerField', [], {}),
'dud': ('django.db.models.fields.IntegerField', [], {}),
'empty': ('django.db.models.fields.IntegerField', [], {}),
'excluded': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored': ('django.db.models.fields.IntegerField', [], {}),
'keypass_all_beads': ('django.db.models.fields.IntegerField', [], {}),
'lib': ('django.db.models.fields.IntegerField', [], {}),
'libFinal': ('django.db.models.fields.IntegerField', [], {}),
'libKp': ('django.db.models.fields.IntegerField', [], {}),
'libLive': ('django.db.models.fields.IntegerField', [], {}),
'libMix': ('django.db.models.fields.IntegerField', [], {}),
'lib_pass_basecaller': ('django.db.models.fields.IntegerField', [], {}),
'lib_pass_cafie': ('django.db.models.fields.IntegerField', [], {}),
'live': ('django.db.models.fields.IntegerField', [], {}),
'pinned': ('django.db.models.fields.IntegerField', [], {}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'analysismetrics_set'", 'to': "orm['rundb.Results']"}),
'sysCF': ('django.db.models.fields.FloatField', [], {}),
'sysDR': ('django.db.models.fields.FloatField', [], {}),
'sysIE': ('django.db.models.fields.FloatField', [], {}),
'tf': ('django.db.models.fields.IntegerField', [], {}),
'tfFinal': ('django.db.models.fields.IntegerField', [], {}),
'tfKp': ('django.db.models.fields.IntegerField', [], {}),
'tfLive': ('django.db.models.fields.IntegerField', [], {}),
'tfMix': ('django.db.models.fields.IntegerField', [], {}),
'washout': ('django.db.models.fields.IntegerField', [], {}),
'washout_ambiguous': ('django.db.models.fields.IntegerField', [], {}),
'washout_dud': ('django.db.models.fields.IntegerField', [], {}),
'washout_library': ('django.db.models.fields.IntegerField', [], {}),
'washout_live': ('django.db.models.fields.IntegerField', [], {}),
'washout_test_fragment': ('django.db.models.fields.IntegerField', [], {})
},
'rundb.applproduct': {
'Meta': {'object_name': 'ApplProduct'},
'applType': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.RunType']"}),
'defaultBarcodeKitName': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'defaultChipType': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'defaultControlSeqKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'controlSeqKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultFlowCount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'defaultGenomeRefName': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'defaultHotSpotRegionBedFileName': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'defaultIonChefPrepKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ionChefPrepKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultLibraryKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'libKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultPairedEndAdapterKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'peAdapterKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultPairedEndLibraryKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'peLibKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultPairedEndSequencingKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'peSeqKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultSamplePrepKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samplePrepKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultSequencingKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seqKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultTargetRegionBedFileName': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'defaultTemplateKit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'templateKit_applProduct_set'", 'null': 'True', 'to': "orm['rundb.KitInfo']"}),
'defaultVariantFrequency': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isActive': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'isDefault': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isDefaultBarcoded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isDefaultPairedEnd': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isHotspotRegionBEDFileSuppported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'isPairedEndSupported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'isVisible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'productCode': ('django.db.models.fields.CharField', [], {'default': "'any'", 'unique': 'True', 'max_length': '64'}),
'productName': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'rundb.backup': {
'Meta': {'object_name': 'Backup'},
'backupDate': ('django.db.models.fields.DateTimeField', [], {}),
'backupName': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'backupPath': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isBackedUp': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rundb.backupconfig': {
'Meta': {'object_name': 'BackupConfig'},
'backup_directory': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '256', 'blank': 'True'}),
'backup_threshold': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'bandwidth_limit': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'grace_period': ('django.db.models.fields.IntegerField', [], {'default': '72'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keepTN': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_to_backup': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'online': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'blank': 'True'})
},
'rundb.chip': {
'Meta': {'object_name': 'Chip'},
'analysisargs': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'basecallerargs': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'beadfindargs': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'prebasecallerargs': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'prethumbnailbasecallerargs': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'slots': ('django.db.models.fields.IntegerField', [], {}),
'thumbnailanalysisargs': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'thumbnailbasecallerargs': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'thumbnailbeadfindargs': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'})
},
'rundb.content': {
'Meta': {'object_name': 'Content'},
'contentupload': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contents'", 'to': "orm['rundb.ContentUpload']"}),
'file': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contents'", 'to': "orm['rundb.Publisher']"})
},
'rundb.contentupload': {
'Meta': {'object_name': 'ContentUpload'},
'file_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.Publisher']", 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'rundb.cruncher': {
'Meta': {'object_name': 'Cruncher'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'rundb.dm_prune_field': {
'Meta': {'object_name': 'dm_prune_field'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rule': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'})
},
'rundb.dm_prune_group': {
'Meta': {'object_name': 'dm_prune_group'},
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'ruleNums': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '128', 'blank': 'True'})
},
'rundb.dm_reports': {
'Meta': {'object_name': 'dm_reports'},
'autoAge': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'autoPrune': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'autoType': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'pruneLevel': ('django.db.models.fields.CharField', [], {'default': "'No-op'", 'max_length': '128'})
},
'rundb.dmfileset': {
'Meta': {'object_name': 'DMFileSet'},
'auto_action': ('django.db.models.fields.CharField', [], {'default': "'OFF'", 'max_length': '8'}),
'auto_trigger_age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'auto_trigger_usage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'backup_directory': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'bandwidth_limit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'del_empty_dir': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exclude': ('iondb.rundb.separatedValuesField.SeparatedValuesField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include': ('iondb.rundb.separatedValuesField.SeparatedValuesField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '48'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '8'})
},
'rundb.dmfilestat': {
'Meta': {'object_name': 'DMFileStat'},
'action_state': ('django.db.models.fields.CharField', [], {'default': "'L'", 'max_length': '8'}),
'archivepath': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'diskspace': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dmfileset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.DMFileSet']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.Results']", 'null': 'True', 'blank': 'True'})
},
'rundb.dnabarcode': {
'Meta': {'object_name': 'dnaBarcode'},
'adapter': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'annotation': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'floworder': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_str': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'length': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'score_cutoff': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'score_mode': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'sequence': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'rundb.downloadmonitor': {
'Meta': {'object_name': 'DownloadMonitor'},
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_dir': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60'}),
'tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
'rundb.emailaddress': {
'Meta': {'object_name': 'EmailAddress'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selected': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rundb.eventlog': {
'Meta': {'object_name': 'EventLog'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_eventlog'", 'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'username': ('django.db.models.fields.CharField', [], {'default': "'ION'", 'max_length': '32', 'blank': 'True'})
},
'rundb.experiment': {
'Meta': {'object_name': 'Experiment'},
'autoAnalyze': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'baselineRun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chipBarcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'chipType': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'cycles': ('django.db.models.fields.IntegerField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'diskusage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'displayName': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'expCompInfo': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expDir': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'expName': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'flows': ('django.db.models.fields.IntegerField', [], {}),
'flowsInOrder': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'ftpStatus': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isReverseRun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'log': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'metaData': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'pgmName': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'plan': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'experiment'", 'unique': 'True', 'null': 'True', 'to': "orm['rundb.PlannedExperiment']"}),
'rawdatastyle': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': '24', 'null': 'True', 'blank': 'True'}),
'reagentBarcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'resultDate': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'reverse_primer': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'runMode': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'seqKitBarcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'sequencekitbarcode': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'sequencekitname': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'star': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'storageHost': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'storage_options': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '200'}),
'unique': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'usePreBeadfind': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_ack': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '24'})
},
'rundb.experimentanalysissettings': {
'Meta': {'object_name': 'ExperimentAnalysisSettings'},
'barcodeKitName': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'barcodedSamples': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'eas_set'", 'null': 'True', 'to': "orm['rundb.Experiment']"}),
'hotSpotRegionBedFile': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isEditable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isOneTimeOverride': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'libraryKey': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'libraryKitBarcode': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'libraryKitName': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'selectedPlugins': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'targetRegionBedFile': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'threePrimeAdapter': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'rundb.fileserver': {
'Meta': {'object_name': 'FileServer'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filesPrefix': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'percentfull': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'})
},
'rundb.globalconfig': {
'Meta': {'object_name': 'GlobalConfig'},
'auto_archive_ack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_archive_enable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'barcode_args': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'base_recalibrate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_flow_order': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'default_library_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'default_plugin_script': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'default_storage_options': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '500', 'blank': 'True'}),
'default_test_fragment_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'enable_auto_pkg_dl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'fasta_path': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mark_duplicates': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'plugin_folder': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'plugin_output_folder': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'records_to_display': ('django.db.models.fields.IntegerField', [], {'default': '20', 'blank': 'True'}),
'reference_path': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'selected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'ts_update_status': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'web_root': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'})
},
'rundb.kitinfo': {
'Meta': {'unique_together': "(('kitType', 'name'),)", 'object_name': 'KitInfo'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '3024', 'blank': 'True'}),
'flowCount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrumentType': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'isActive': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'kitType': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'nucleotideType': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'runMode': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
'rundb.kitpart': {
'Meta': {'unique_together': "(('barcode',),)", 'object_name': 'KitPart'},
'barcode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '7'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.KitInfo']"})
},
'rundb.libmetrics': {
'Genome_Version': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'Index_Version': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'Meta': {'object_name': 'LibMetrics'},
'align_sample': ('django.db.models.fields.IntegerField', [], {}),
'aveKeyCounts': ('django.db.models.fields.FloatField', [], {}),
'cf': ('django.db.models.fields.FloatField', [], {}),
'dr': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_100q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_100q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_100q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_100q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_100q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_200q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_200q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_200q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_200q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_200q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_300q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_300q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_300q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_300q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_300q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_400q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_400q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_400q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_400q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_400q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_50q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_50q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_50q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_50q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_50q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_from_number_of_sampled_reads': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_mapped_bases_in_q10_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'extrapolated_mapped_bases_in_q17_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'extrapolated_mapped_bases_in_q20_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'extrapolated_mapped_bases_in_q47_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'extrapolated_mapped_bases_in_q7_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'extrapolated_q10_alignments': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q10_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q10_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q10_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q10_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q17_alignments': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q17_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q17_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q17_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q17_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q20_alignments': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q20_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q20_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q20_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q20_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q47_alignments': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q47_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q47_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q47_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q47_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q7_alignments': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q7_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'extrapolated_q7_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q7_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'extrapolated_q7_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'genome': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'genomelength': ('django.db.models.fields.IntegerField', [], {}),
'genomesize': ('django.db.models.fields.BigIntegerField', [], {}),
'i100Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i100Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i100Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i100Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i100Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i150Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i150Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i150Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i150Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i150Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i200Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i200Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i200Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i200Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i200Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i250Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i250Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i250Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i250Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i250Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i300Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i300Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i300Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i300Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i300Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i350Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i350Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i350Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i350Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i350Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i400Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i400Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i400Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i400Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i400Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i450Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i450Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i450Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i450Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i450Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i500Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i500Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i500Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i500Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i500Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i50Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i50Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i50Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i50Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i50Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i550Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i550Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i550Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i550Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i550Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'i600Q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'i600Q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'i600Q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'i600Q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'i600Q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ie': ('django.db.models.fields.FloatField', [], {}),
'q10_alignments': ('django.db.models.fields.IntegerField', [], {}),
'q10_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'q10_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'q10_mapped_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q10_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'q10_qscore_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q17_alignments': ('django.db.models.fields.IntegerField', [], {}),
'q17_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'q17_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'q17_mapped_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q17_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'q17_qscore_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q20_alignments': ('django.db.models.fields.IntegerField', [], {}),
'q20_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'q20_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'q20_mapped_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q20_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'q20_qscore_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q47_alignments': ('django.db.models.fields.IntegerField', [], {}),
'q47_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'q47_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'q47_mapped_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q47_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'q47_qscore_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q7_alignments': ('django.db.models.fields.IntegerField', [], {}),
'q7_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'q7_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'q7_mapped_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q7_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'q7_qscore_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'r100Q10': ('django.db.models.fields.IntegerField', [], {}),
'r100Q17': ('django.db.models.fields.IntegerField', [], {}),
'r100Q20': ('django.db.models.fields.IntegerField', [], {}),
'r200Q10': ('django.db.models.fields.IntegerField', [], {}),
'r200Q17': ('django.db.models.fields.IntegerField', [], {}),
'r200Q20': ('django.db.models.fields.IntegerField', [], {}),
'r50Q10': ('django.db.models.fields.IntegerField', [], {}),
'r50Q17': ('django.db.models.fields.IntegerField', [], {}),
'r50Q20': ('django.db.models.fields.IntegerField', [], {}),
'rCoverage': ('django.db.models.fields.FloatField', [], {}),
'rLongestAlign': ('django.db.models.fields.IntegerField', [], {}),
'rMeanAlignLen': ('django.db.models.fields.IntegerField', [], {}),
'rNumAlignments': ('django.db.models.fields.IntegerField', [], {}),
'raw_accuracy': ('django.db.models.fields.FloatField', [], {}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'libmetrics_set'", 'to': "orm['rundb.Results']"}),
's100Q10': ('django.db.models.fields.IntegerField', [], {}),
's100Q17': ('django.db.models.fields.IntegerField', [], {}),
's100Q20': ('django.db.models.fields.IntegerField', [], {}),
's200Q10': ('django.db.models.fields.IntegerField', [], {}),
's200Q17': ('django.db.models.fields.IntegerField', [], {}),
's200Q20': ('django.db.models.fields.IntegerField', [], {}),
's50Q10': ('django.db.models.fields.IntegerField', [], {}),
's50Q17': ('django.db.models.fields.IntegerField', [], {}),
's50Q20': ('django.db.models.fields.IntegerField', [], {}),
'sCoverage': ('django.db.models.fields.FloatField', [], {}),
'sLongestAlign': ('django.db.models.fields.IntegerField', [], {}),
'sMeanAlignLen': ('django.db.models.fields.IntegerField', [], {}),
'sNumAlignments': ('django.db.models.fields.IntegerField', [], {}),
'sampled_100q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_100q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_100q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_100q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_100q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_200q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_200q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_200q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_200q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_200q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_300q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_300q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_300q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_300q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_300q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_400q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_400q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_400q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_400q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_400q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_50q10_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_50q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_50q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_50q47_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_50q7_reads': ('django.db.models.fields.IntegerField', [], {}),
'sampled_mapped_bases_in_q10_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'sampled_mapped_bases_in_q17_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'sampled_mapped_bases_in_q20_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'sampled_mapped_bases_in_q47_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'sampled_mapped_bases_in_q7_alignments': ('django.db.models.fields.BigIntegerField', [], {}),
'sampled_q10_alignments': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q10_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'sampled_q10_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q10_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q10_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'sampled_q17_alignments': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q17_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'sampled_q17_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q17_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q17_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'sampled_q20_alignments': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q20_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'sampled_q20_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q20_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q20_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'sampled_q47_alignments': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q47_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'sampled_q47_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q47_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q47_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'sampled_q7_alignments': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q7_coverage_percentage': ('django.db.models.fields.FloatField', [], {}),
'sampled_q7_longest_alignment': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q7_mean_alignment_length': ('django.db.models.fields.IntegerField', [], {}),
'sampled_q7_mean_coverage_depth': ('django.db.models.fields.FloatField', [], {}),
'sysSNR': ('django.db.models.fields.FloatField', [], {}),
'totalNumReads': ('django.db.models.fields.IntegerField', [], {}),
'total_mapped_reads': ('django.db.models.fields.BigIntegerField', [], {}),
'total_mapped_target_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'total_number_of_sampled_reads': ('django.db.models.fields.IntegerField', [], {})
},
'rundb.librarykey': {
'Meta': {'object_name': 'LibraryKey'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'default': "'Forward'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isDefault': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'runMode': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': '64', 'blank': 'True'}),
'sequence': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'rundb.librarykit': {
'Meta': {'object_name': 'LibraryKit'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '3024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'sap': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'})
},
'rundb.location': {
'Meta': {'object_name': 'Location'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'defaultlocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'rundb.message': {
'Meta': {'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'expires': ('django.db.models.fields.TextField', [], {'default': "'read'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'route': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.TextField', [], {'default': "'unread'", 'blank': 'True'}),
'tags': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'rundb.plannedexperiment': {
'Meta': {'ordering': "['-id']", 'object_name': 'PlannedExperiment'},
'adapter': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'autoName': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'chipBarcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'controlSequencekitname': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'cycles': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expName': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'flowsInOrder': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irworkflow': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'isFavorite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isPlanGroup': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isReusable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isReverseRun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isSystem': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'isSystemDefault': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'libkit': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'metaData': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'pairedEndLibraryAdapterName': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'parentPlan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'childPlan_set'", 'null': 'True', 'to': "orm['rundb.PlannedExperiment']"}),
'planDisplayedName': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'planExecuted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'planExecutedDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'planGUID': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'planName': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'planPGM': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'planShortID': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'planStatus': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'preAnalysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'plans'", 'blank': 'True', 'to': "orm['rundb.Project']"}),
'qcValues': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rundb.QCType']", 'null': 'True', 'through': "orm['rundb.PlannedExperimentQC']", 'symmetrical': 'False'}),
'reverse_primer': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'runMode': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'runType': ('django.db.models.fields.CharField', [], {'default': "'GENS'", 'max_length': '512'}),
'runname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'samplePrepKitName': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'seqKitBarcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'storageHost': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'storage_options': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '200'}),
'templatingKitName': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'usePostBeadfind': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'usePreBeadfind': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'rundb.plannedexperimentqc': {
'Meta': {'unique_together': "(('plannedExperiment', 'qcType'),)", 'object_name': 'PlannedExperimentQC'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plannedExperiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.PlannedExperiment']"}),
'qcType': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.QCType']"}),
'threshold': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'rundb.plugin': {
'Meta': {'unique_together': "(('name', 'version'),)", 'object_name': 'Plugin'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'autorun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'autorunMutable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'config': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'majorBlock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'db_index': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'pluginsettings': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'script': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'selected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'userinputfields': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'rundb.pluginresult': {
'Meta': {'ordering': "['-id']", 'unique_together': "(('plugin', 'result'),)", 'object_name': 'PluginResult'},
'apikey': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'config': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'endtime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jobid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.Plugin']"}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pluginresult_set'", 'to': "orm['rundb.Results']"}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '-1'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'store': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'})
},
'rundb.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'rundb.publisher': {
'Meta': {'object_name': 'Publisher'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'global_meta': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'rundb.qctype': {
'Meta': {'object_name': 'QCType'},
'defaultThreshold': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maxThreshold': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'minThreshold': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'qcName': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'})
},
'rundb.qualitymetrics': {
'Meta': {'object_name': 'QualityMetrics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'q0_100bp_reads': ('django.db.models.fields.IntegerField', [], {}),
'q0_150bp_reads': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'q0_50bp_reads': ('django.db.models.fields.IntegerField', [], {}),
'q0_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q0_max_read_length': ('django.db.models.fields.IntegerField', [], {}),
'q0_mean_read_length': ('django.db.models.fields.FloatField', [], {}),
'q0_reads': ('django.db.models.fields.IntegerField', [], {}),
'q17_100bp_reads': ('django.db.models.fields.IntegerField', [], {}),
'q17_150bp_reads': ('django.db.models.fields.IntegerField', [], {}),
'q17_50bp_reads': ('django.db.models.fields.IntegerField', [], {}),
'q17_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q17_max_read_length': ('django.db.models.fields.IntegerField', [], {}),
'q17_mean_read_length': ('django.db.models.fields.FloatField', [], {}),
'q17_reads': ('django.db.models.fields.IntegerField', [], {}),
'q20_100bp_reads': ('django.db.models.fields.IntegerField', [], {}),
'q20_150bp_reads': ('django.db.models.fields.IntegerField', [], {}),
'q20_50bp_reads': ('django.db.models.fields.IntegerField', [], {}),
'q20_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'q20_max_read_length': ('django.db.models.fields.FloatField', [], {}),
'q20_mean_read_length': ('django.db.models.fields.IntegerField', [], {}),
'q20_reads': ('django.db.models.fields.IntegerField', [], {}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qualitymetrics_set'", 'to': "orm['rundb.Results']"})
},
'rundb.referencegenome': {
'Meta': {'ordering': "['short_name']", 'object_name': 'ReferenceGenome'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'index_version': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reference_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'verbose_error': ('django.db.models.fields.CharField', [], {'max_length': '3000', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'rundb.reportstorage': {
'Meta': {'object_name': 'ReportStorage'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dirPath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'webServerPath': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'rundb.results': {
'Meta': {'object_name': 'Results'},
'analysisVersion': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'autoExempt': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diskusage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'eas': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'results_set'", 'null': 'True', 'to': "orm['rundb.ExperimentAnalysisSettings']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results_set'", 'to': "orm['rundb.Experiment']"}),
'fastqLink': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'framesProcessed': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'metaData': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'parentIDs': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'processedCycles': ('django.db.models.fields.IntegerField', [], {}),
'processedflows': ('django.db.models.fields.IntegerField', [], {}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'results'", 'symmetrical': 'False', 'to': "orm['rundb.Project']"}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'reportLink': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'reportStatus': ('django.db.models.fields.CharField', [], {'default': "'Nothing'", 'max_length': '64', 'null': 'True'}),
'reportstorage': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'storage'", 'null': 'True', 'to': "orm['rundb.ReportStorage']"}),
'representative': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'resultsName': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'resultsType': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'runid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'sffLink': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'tfFastq': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'tfSffLink': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'timeStamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'timeToComplete': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'rundb.rig': {
'Meta': {'object_name': 'Rig'},
'alarms': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ftppassword': ('django.db.models.fields.CharField', [], {'default': "'ionguest'", 'max_length': '64'}),
'ftprootdir': ('django.db.models.fields.CharField', [], {'default': "'results'", 'max_length': '64'}),
'ftpserver': ('django.db.models.fields.CharField', [], {'default': "'192.168.201.1'", 'max_length': '128'}),
'ftpusername': ('django.db.models.fields.CharField', [], {'default': "'ionguest'", 'max_length': '64'}),
'last_clean_date': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'last_experiment': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'last_init_date': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rundb.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'updateflag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updatehome': ('django.db.models.fields.CharField', [], {'default': "'192.168.201.1'", 'max_length': '256'}),
'version': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'})
},
'rundb.runscript': {
'Meta': {'object_name': 'RunScript'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'script': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'rundb.runtype': {
'Meta': {'object_name': 'RunType'},
'barcode': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'nucleotideType': ('django.db.models.fields.CharField', [], {'default': "'dna'", 'max_length': '64', 'blank': 'True'}),
'runType': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'rundb.sample': {
'Meta': {'unique_together': "(('name', 'externalId'),)", 'object_name': 'Sample'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'displayedName': ('django.db.models.fields.CharField', [], {'max_length': '127', 'null': 'True', 'blank': 'True'}),
'experiments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'samples'", 'null': 'True', 'to': "orm['rundb.Experiment']"}),
'externalId': ('django.db.models.fields.CharField', [], {'max_length': '127', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'})
},
'rundb.sequencingkit': {
'Meta': {'object_name': 'SequencingKit'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '3024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'sap': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'})
},
'rundb.template': {
'Meta': {'object_name': 'Template'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isofficial': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'sequence': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'rundb.tfmetrics': {
'HPAccuracy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'Meta': {'object_name': 'TFMetrics'},
'Q10Histo': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'Q10Mean': ('django.db.models.fields.FloatField', [], {}),
'Q10ReadCount': ('django.db.models.fields.FloatField', [], {}),
'Q17Histo': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'Q17Mean': ('django.db.models.fields.FloatField', [], {}),
'Q17ReadCount': ('django.db.models.fields.FloatField', [], {}),
'SysSNR': ('django.db.models.fields.FloatField', [], {}),
'aveKeyCount': ('django.db.models.fields.FloatField', [], {}),
'corrHPSNR': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keypass': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'number': ('django.db.models.fields.FloatField', [], {}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tfmetrics_set'", 'to': "orm['rundb.Results']"}),
'sequence': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'rundb.threeprimeadapter': {
'Meta': {'object_name': 'ThreePrimeadapter'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'default': "'Forward'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isDefault': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'runMode': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': '64', 'blank': 'True'}),
'sequence': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'rundb.usereventlog': {
'Meta': {'object_name': 'UserEventLog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timeStamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'upload': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['rundb.ContentUpload']"})
},
'rundb.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '93'}),
'note': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'user'", 'max_length': '256'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'rundb.variantfrequencies': {
'Meta': {'object_name': 'VariantFrequencies'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '3024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'})
}
}
complete_apps = ['rundb']
| 85.57984
| 199
| 0.566839
|
4a028aa923bfb6546f5a22b342f4fdf8b2702942
| 2,972
|
py
|
Python
|
ada/node_naming.py
|
carter-e-veldhuizen/RACK
|
8ae9b4ecd17d812966aed985d6bc51b1b7ca6e79
|
[
"BSD-3-Clause"
] | null | null | null |
ada/node_naming.py
|
carter-e-veldhuizen/RACK
|
8ae9b4ecd17d812966aed985d6bc51b1b7ca6e79
|
[
"BSD-3-Clause"
] | null | null | null |
ada/node_naming.py
|
carter-e-veldhuizen/RACK
|
8ae9b4ecd17d812966aed985d6bc51b1b7ca6e79
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module contains facilities for extracting traceability-comments Ada program.
"""
__copyright__ = "Copyright (c) 2020, Galois, Inc."
import logging
from typing import Callable, Dict, NewType, Optional, Set
import urllib.parse
import re
import libadalang as lal
from ada_visitor import AdaVisitor
logger = logging.getLogger('ada')
# Type to abstract over what nodes we put in the graph. Can be turned into a
# concrete or abstract class if the complexity ever demands it.
GraphNode = lal.Name
def warn_about_node(node: lal.AdaNode) -> None:
"""
Emits a warning because we could not resolve the definition site for the
given node.
"""
name = node.text
loc = node.full_sloc_image[:-2]
logger.warning(f"Could not resolve the name {name} as it appears in {loc}.")
def safe_xref(node: lal.AdaNode) -> Optional[lal.DefiningName]:
"""
p_gnat_xref fails catastrophically (dereferences a null pointer), this
wraps it up nicely.
"""
try:
return node.p_gnat_xref()
except lal.PropertyError:
warn_about_node(node)
return None
NodeDisplayName = NewType("NodeDisplayName", str)
NodeFile = NewType("NodeFile", str)
NodeIdentifier = NewType("NodeIdentifier", str)
NodeKey = NewType("NodeKey", str)
NodeURI = NewType("NodeURI", str)
def get_node_display_name(node: GraphNode) -> NodeDisplayName:
"""Computes the name to display for a node."""
return NodeDisplayName(node.text)
def get_node_file(node: GraphNode) -> Optional[NodeFile]:
""""
Returns the name of the file within which this node was defined, assuming
we succeed to resolve the reference.
"""
xref = safe_xref(node)
# NOTE: if we need the full path, we can use:
# xref.p_basic_decl.unit.filename
# NOTE: full_sloc_image returns "file.ada:<line>:<column>: " but we just
# want the filename
if xref is None:
return None
return NodeFile(xref.p_basic_decl.full_sloc_image.split(":")[0])
def get_node_key(node: GraphNode) -> NodeKey:
"""
Computes a key we can use for identifying this node uniquely.
"""
xref = safe_xref(node)
if xref is None:
return NodeKey(str(node))
return NodeKey(str(xref))
def get_node_identifier(node: GraphNode) -> NodeIdentifier:
"""
Computes the identifier to use in the database.
"""
return NodeIdentifier(get_node_uri(node))
def get_node_uri(node: GraphNode) -> NodeURI:
"""Computes the URI to use for a node."""
# NOTE: we need some encoding scheme, because Ada allows operator
# overloading, so the functions may be called "&" for instance.
encode = urllib.parse.quote_plus
xref = node.p_gnat_xref()
if not xref:
return NodeURI(f'SWCOM_{encode(node.p_relative_name.p_canonical_text)}')
# raise Exception(f"The reference to node {node} could not be resolved.")
return NodeURI(f'SWCOM_{encode(xref.p_basic_decl.p_canonical_fully_qualified_name)}')
| 32.659341
| 93
| 0.702557
|
4a028b4129e3757b72078490e8e7b253610a3474
| 2,110
|
py
|
Python
|
Chapter 2/bhp_ssh_server.py
|
carloocchiena/blackhat_python_book_code
|
ce181b084e9416dff4c6fba8f4fe7976e6216ba8
|
[
"MIT"
] | 36
|
2021-11-16T07:59:39.000Z
|
2022-03-20T21:23:35.000Z
|
Chapter 2/bhp_ssh_server.py
|
carloocchiena/blackhat_python_book_code
|
ce181b084e9416dff4c6fba8f4fe7976e6216ba8
|
[
"MIT"
] | null | null | null |
Chapter 2/bhp_ssh_server.py
|
carloocchiena/blackhat_python_book_code
|
ce181b084e9416dff4c6fba8f4fe7976e6216ba8
|
[
"MIT"
] | 5
|
2022-01-16T22:59:05.000Z
|
2022-02-16T17:23:00.000Z
|
import socket
import paramiko
import threading
import sys
# using the server host key from the paramiko demo files
host_key = paramiko.RSAKey(filename="test_rsa.key")
class Server(paramiko.ServerInterface):
def __init__(self):
self.event = threading.Event()
def check_channel_request(self, kind, chanid):
if kind == "session":
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_password(self, username, password):
if username == "root" and password == "toor":
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
server = sys.argv[1]
ssh_port = int(sys.argv[2])
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((server, ssh_port))
sock.listen(100)
print("[+] Listening for connection...")
client, addr = sock.accept()
except Exception as e:
print(f"[-] Listen failed: {e}")
sys.exit(1)
print("[+] Got a connection!")
try:
# noinspection PyTypeChecker
bh_session = paramiko.Transport(client)
bh_session.add_server_key(host_key)
server = Server()
try:
bh_session.start_server(server=server)
except paramiko.SSHException:
print("[-] SSH Negotiation failed")
chan = bh_session.accept(20)
print("[+] Authenticated!")
print(chan.recv(1024))
chan.send("Welcome to bhp_ssh!")
while True:
try:
command = input("Enter command: ").strip("\n")
if command != "exit":
chan.send(command)
print(chan.recv(1024).decode(errors="ignore") + "\n")
else:
chan.send("exit")
print("Exiting...")
bh_session.close()
raise Exception("exit")
except KeyboardInterrupt:
bh_session.close()
except Exception as e:
print(f"[-] Caught exception: {str(e)}")
bh_session.close()
finally:
sys.exit(1)
| 27.402597
| 69
| 0.61327
|
4a028c133f6187829a7df97eec65aa0ec7da3de2
| 289
|
py
|
Python
|
blog/signals.py
|
alexnazarov090/Django_DIY_BLog
|
9daeacff780726e986247782ebccfc70bd529016
|
[
"MIT"
] | null | null | null |
blog/signals.py
|
alexnazarov090/Django_DIY_BLog
|
9daeacff780726e986247782ebccfc70bd529016
|
[
"MIT"
] | null | null | null |
blog/signals.py
|
alexnazarov090/Django_DIY_BLog
|
9daeacff780726e986247782ebccfc70bd529016
|
[
"MIT"
] | null | null | null |
from django.db.models.signals import post_delete
from django.dispatch import receiver
from blog.models import BlogAuthor
@receiver(post_delete, sender=BlogAuthor)
def delete_blogger_status(sender, instance, **kwargs):
instance.username.is_blogger = False
instance.username.save()
| 32.111111
| 54
| 0.809689
|
4a028c3cc0e5992d7d3fef54961983964ff84cf0
| 3,837
|
py
|
Python
|
twobody/utils.py
|
adrn/TwoBody
|
3af41d642e3485aec9ddc18e582e73c561c4df96
|
[
"BSD-3-Clause"
] | 3
|
2020-09-14T17:36:20.000Z
|
2022-03-05T11:18:54.000Z
|
twobody/utils.py
|
adrn/TwoBody
|
3af41d642e3485aec9ddc18e582e73c561c4df96
|
[
"BSD-3-Clause"
] | 11
|
2017-08-10T14:57:23.000Z
|
2021-07-13T19:20:58.000Z
|
twobody/utils.py
|
adrn/TwoBody
|
3af41d642e3485aec9ddc18e582e73c561c4df96
|
[
"BSD-3-Clause"
] | 3
|
2017-08-09T10:41:27.000Z
|
2020-12-11T14:17:34.000Z
|
# Third-party
from astropy.time import Time
from astropy.utils import check_broadcast
import numpy as np
__all__ = ['ArrayProcessor']
class ArrayProcessor:
def __init__(self, *arrs):
self.arrs = [np.array(arr) for arr in arrs]
def prepare_arrays(self):
"""Make sure input arrays are all C-contiguous and have same shape."""
self.max_shape = None
for arr in self.arrs:
if self.max_shape is None:
self.max_shape = arr.shape
elif arr.shape > self.max_shape:
self.max_shape = arr.shape
orig_shapes = []
arrs_1d = []
for arr in self.arrs:
orig_shapes.append(arr.shape)
arr = np.broadcast_to(arr, self.max_shape).ravel()
arrs_1d.append(np.ascontiguousarray(arr.astype(np.float64)))
if not check_broadcast(orig_shapes):
raise ValueError("Shapes are not broadcastable: {0}"
.format(orig_shapes))
return arrs_1d
def prepare_result(self, res):
return res.reshape(self.max_shape)
def format_doc(docstring, *args, **kwargs):
"""
A modified version of `astropy.utils.decorators.format_doc` that first
applies ``.format()`` with ``__doc__``, then calls ``.format()`` with the
arguments.
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError('docstring must be a string or containing a '
'docstring that is not empty.')
# If the original has a not-empty docstring append it to the format
# kwargs.
_doc = obj.__doc__ or ''
doc = doc.format(__doc__=_doc)
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
| 36.542857
| 79
| 0.624967
|
4a028c5056e21ab06079ebeb07df52e087c37fe0
| 7,955
|
py
|
Python
|
tellopy/_internal/protocol.py
|
cstansbu/TelloPy
|
d1b8d486866421a7a28558238eb420eb0147d95a
|
[
"Apache-2.0"
] | 3
|
2021-07-28T16:06:28.000Z
|
2021-09-10T18:07:01.000Z
|
tellopy/_internal/protocol.py
|
cstansbu/TelloPy
|
d1b8d486866421a7a28558238eb420eb0147d95a
|
[
"Apache-2.0"
] | 6
|
2020-07-27T13:04:44.000Z
|
2021-09-22T19:27:12.000Z
|
bot/lib/python3.7/site-packages/tellopy/_internal/protocol.py
|
carlosrh18/DavinciBot
|
d73a6b7f68d7bab25d134d3f85c6b63a86c206c5
|
[
"MIT"
] | 1
|
2021-03-05T19:26:15.000Z
|
2021-03-05T19:26:15.000Z
|
import datetime
from io import BytesIO
from . import crc
from . utils import *
START_OF_PACKET = 0xcc
WIFI_MSG = 0x1a
VIDEO_RATE_QUERY = 40
LIGHT_MSG = 53
FLIGHT_MSG = 0x56
LOG_MSG = 0x1050
VIDEO_ENCODER_RATE_CMD = 0x20
VIDEO_START_CMD = 0x25
VIDEO_MODE_CMD = 0x0031
EXPOSURE_CMD = 0x34
TIME_CMD = 70
STICK_CMD = 80
TAKEOFF_CMD = 0x0054
LAND_CMD = 0x0055
FLIP_CMD = 0x005c
PALM_LAND_CMD = 0x005e
SET_ALT_LIMIT_CMD = 0x0058
TAKE_PICTURE_COMMAND = 48
TELLO_CMD_FILE_SIZE = 98 # pt50
TELLO_CMD_FILE_DATA = 99 # pt50
TELLO_CMD_FILE_COMPLETE = 100 # pt48
#Flip commands taken from Go version of code
#FlipFront flips forward.
FlipFront = 0
#FlipLeft flips left.
FlipLeft = 1
#FlipBack flips backwards.
FlipBack = 2
#FlipRight flips to the right.
FlipRight = 3
#FlipForwardLeft flips forwards and to the left.
FlipForwardLeft = 4
#FlipBackLeft flips backwards and to the left.
FlipBackLeft = 5
#FlipBackRight flips backwards and to the right.
FlipBackRight = 6
#FlipForwardRight flips forwards and to the right.
FlipForwardRight = 7
class Packet(object):
def __init__(self, cmd, pkt_type=0x68, payload=b''):
if isinstance(cmd, str):
self.buf = bytearray()
for c in cmd:
self.buf.append(ord(c))
elif isinstance(cmd, (bytearray, bytes)):
self.buf = bytearray()
self.buf[:] = cmd
else:
self.buf = bytearray([
START_OF_PACKET,
0, 0,
0,
pkt_type,
(cmd & 0xff), ((cmd >> 8) & 0xff),
0, 0])
self.buf.extend(payload)
def fixup(self, seq_num=0):
buf = self.get_buffer()
if buf[0] == START_OF_PACKET:
buf[1], buf[2] = le16(len(buf)+2)
buf[1] = (buf[1] << 3)
buf[3] = crc.crc8(buf[0:3])
buf[7], buf[8] = le16(seq_num)
self.add_int16(crc.crc16(buf))
def get_buffer(self):
return self.buf
def get_data(self):
return self.buf[9:len(self.buf)-2]
def add_byte(self, val):
self.buf.append(val & 0xff)
def add_int16(self, val):
self.add_byte(val)
self.add_byte(val >> 8)
def add_time(self, time=datetime.datetime.now()):
self.add_int16(time.hour)
self.add_int16(time.minute)
self.add_int16(time.second)
self.add_int16(int(time.microsecond/1000) & 0xff)
self.add_int16((int(time.microsecond/1000) >> 8) & 0xff)
def get_time(self, buf=None):
if buf is None:
buf = self.get_data()[1:]
hour = int16(buf[0], buf[1])
min = int16(buf[2], buf[3])
sec = int16(buf[4], buf[5])
millisec = int16(buf[6], buf[8])
now = datetime.datetime.now()
return datetime.datetime(now.year, now.month, now.day, hour, min, sec, millisec)
class FlightData(object):
def __init__(self, data):
self.battery_low = 0
self.battery_lower = 0
self.battery_percentage = 0
self.battery_state = 0
self.camera_state = 0
self.down_visual_state = 0
self.drone_battery_left = 0
self.drone_fly_time_left = 0
self.drone_hover = 0
self.em_open = 0
self.em_sky = 0
self.em_ground = 0
self.east_speed = 0
self.electrical_machinery_state = 0
self.factory_mode = 0
self.fly_mode = 0
self.fly_speed = 0
self.fly_time = 0
self.front_in = 0
self.front_lsc = 0
self.front_out = 0
self.gravity_state = 0
self.ground_speed = 0
self.height = 0
self.imu_calibration_state = 0
self.imu_state = 0
self.light_strength = 0
self.north_speed = 0
self.outage_recording = 0
self.power_state = 0
self.pressure_state = 0
self.smart_video_exit_mode = 0
self.temperature_height = 0
self.throw_fly_timer = 0
self.wifi_disturb = 0
self.wifi_strength = 0
self.wind_state = 0
if len(data) < 24:
return
self.height = int16(data[0], data[1])
self.north_speed = int16(data[2], data[3])
self.east_speed = int16(data[4], data[5])
self.ground_speed = int16(data[6], data[7])
self.fly_time = int16(data[8], data[9])
self.imu_state = ((data[10] >> 0) & 0x1)
self.pressure_state = ((data[10] >> 1) & 0x1)
self.down_visual_state = ((data[10] >> 2) & 0x1)
self.power_state = ((data[10] >> 3) & 0x1)
self.battery_state = ((data[10] >> 4) & 0x1)
self.gravity_state = ((data[10] >> 5) & 0x1)
self.wind_state = ((data[10] >> 7) & 0x1)
self.imu_calibration_state = data[11]
self.battery_percentage = data[12]
self.drone_battery_left = int16(data[13], data[14])
self.drone_fly_time_left = int16(data[15], data[16])
self.em_sky = ((data[17] >> 0) & 0x1)
self.em_ground = ((data[17] >> 1) & 0x1)
self.em_open = ((data[17] >> 2) & 0x1)
self.drone_hover = ((data[17] >> 3) & 0x1)
self.outage_recording = ((data[17] >> 4) & 0x1)
self.battery_low = ((data[17] >> 5) & 0x1)
self.battery_lower = ((data[17] >> 6) & 0x1)
self.factory_mode = ((data[17] >> 7) & 0x1)
self.fly_mode = data[18]
self.throw_fly_timer = data[19]
self.camera_state = data[20]
self.electrical_machinery_state = data[21]
self.front_in = ((data[22] >> 0) & 0x1)
self.front_out = ((data[22] >> 1) & 0x1)
self.front_lsc = ((data[22] >> 2) & 0x1)
self.temperature_height = ((data[23] >> 0) & 0x1)
def __str__(self):
return (
("ALT: %2d" % self.height) +
(" | SPD: %2d" % self.ground_speed) +
(" | BAT: %2d" % self.battery_percentage) +
(" | WIFI: %2d" % self.wifi_strength) +
(" | CAM: %2d" % self.camera_state) +
(" | MODE: %2d" % self.fly_mode) +
# (", drone_battery_left=0x%04x" % self.drone_battery_left) +
"")
class DownloadedFile(object):
def __init__(self, filenum, size):
self.filenum = filenum
self.size = size
self.bytes_recieved = 0
self.chunks_received = [0x00] * int((size / 1024 + 1) / 8 + 1)
self.buffer = BytesIO()
def done(self):
return self.bytes_recieved >= self.size
def data(self):
return self.buffer.getvalue()
def haveFragment(self, chunk, fragment):
return self.chunks_received[chunk] & (1<<(fragment%8))
def recvFragment(self, chunk, fragment, size, data):
if self.haveFragment(chunk, fragment):
return False
# Mark a fragment as received.
# Returns true if we have all fragments making up that chunk now.
self.buffer.seek(fragment*1024)
self.buffer.write(data)
self.bytes_recieved += size
self.chunks_received[chunk] |= (1<<(fragment%8))
return self.chunks_received[chunk] == 0xFF
class VideoData(object):
packets_per_frame = 0
def __init__(self, data):
self.h0 = byte(data[0])
self.h1 = byte(data[1])
if VideoData.packets_per_frame < (self.h1 & 0x7f):
VideoData.packets_per_frame = (self.h1 & 0x7f)
def gap(self, video_data):
if video_data is None:
return 0
v0 = self
v1 = video_data
loss = 0
if ((v0.h0 != v1.h0 and v0.h0 != ((v1.h0 + 1) & 0xff))
or (v0.h0 != v1.h0 and (v0.h1 & 0x7f) != 00)
or (v0.h0 == v1.h0 and (v0.h1 & 0x7f) != (v1.h1 & 0x7f) + 1)):
loss = v0.h0 - v1.h0
if loss < 0:
loss = loss + 256
loss = loss * VideoData.packets_per_frame + ((v0.h1 & 0x7f) - (v1.h1 & 0x7f) - 1)
return loss
| 31.318898
| 93
| 0.56807
|
4a028c6172cb880e5ac4035dd4fa1c536ff2cc03
| 73,173
|
py
|
Python
|
src/sage/schemes/toric/divisor.py
|
bopopescu/sagesmc
|
e8d1d31f6f598dba2d763baa2d2e804338f9e89e
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/schemes/toric/divisor.py
|
bopopescu/sagesmc
|
e8d1d31f6f598dba2d763baa2d2e804338f9e89e
|
[
"BSL-1.0"
] | null | null | null |
src/sage/schemes/toric/divisor.py
|
bopopescu/sagesmc
|
e8d1d31f6f598dba2d763baa2d2e804338f9e89e
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
r"""
Toric divisors and divisor classes
Let `X` be a :class:`toric variety
<sage.schemes.toric.variety.ToricVariety_field>` corresponding to a
:class:`rational polyhedral fan <sage.geometry.fan.RationalPolyhedralFan>`
`\Sigma`. A :class:`toric divisor <ToricDivisor_generic>` `D` is a T-Weil
divisor over a given coefficient ring (usually `\ZZ` or `\QQ`), i.e. a formal
linear combination of torus-invariant subvarieties of `X` of codimension one.
In homogeneous coordinates `[z_0:\cdots:z_k]`, these are the subvarieties
`\{z_i=0\}`. Note that there is a finite number of such subvarieties, one for
each ray of `\Sigma`. We generally identify
* Toric divisor `D`,
* Sheaf `\mathcal{O}(D)` (if `D` is Cartier, it is a line bundle),
* Support function `\phi_D` (if `D` is `\QQ`-Cartier, it is a function
linear on each cone of `\Sigma`).
EXAMPLES:
We start with an illustration of basic divisor arithmetic::
sage: dP6 = toric_varieties.dP6()
sage: Dx,Du,Dy,Dv,Dz,Dw = dP6.toric_divisor_group().gens()
sage: Dx
V(x)
sage: -Dx
-V(x)
sage: 2*Dx
2*V(x)
sage: Dx*2
2*V(x)
sage: (1/2)*Dx + Dy/3 - Dz
1/2*V(x) + 1/3*V(y) - V(z)
sage: Dx.parent()
Group of toric ZZ-Weil divisors
on 2-d CPR-Fano toric variety covered by 6 affine patches
sage: (Dx/2).parent()
Group of toric QQ-Weil divisors
on 2-d CPR-Fano toric variety covered by 6 affine patches
Now we create a more complicated variety to demonstrate divisors of different
types::
sage: F = Fan(cones=[(0,1,2,3), (0,1,4)],
... rays=[(1,1,1), (1,-1,1), (1,-1,-1), (1,1,-1), (0,0,1)])
sage: X = ToricVariety(F)
sage: QQ_Cartier = X.divisor([2,2,1,1,1])
sage: Cartier = 2 * QQ_Cartier
sage: Weil = X.divisor([1,1,1,0,0])
sage: QQ_Weil = 1/2 * Weil
sage: [QQ_Weil.is_QQ_Weil(),
... QQ_Weil.is_Weil(),
... QQ_Weil.is_QQ_Cartier(),
... QQ_Weil.is_Cartier()]
[True, False, False, False]
sage: [Weil.is_QQ_Weil(),
... Weil.is_Weil(),
... Weil.is_QQ_Cartier(),
... Weil.is_Cartier()]
[True, True, False, False]
sage: [QQ_Cartier.is_QQ_Weil(),
... QQ_Cartier.is_Weil(),
... QQ_Cartier.is_QQ_Cartier(),
... QQ_Cartier.is_Cartier()]
[True, True, True, False]
sage: [Cartier.is_QQ_Weil(),
... Cartier.is_Weil(),
... Cartier.is_QQ_Cartier(),
... Cartier.is_Cartier()]
[True, True, True, True]
The toric (`\QQ`-Weil) divisors on a toric variety `X` modulo linear
equivalence generate the divisor **class group** `\mathrm{Cl}(X)`, implemented
by :class:`ToricRationalDivisorClassGroup`. If `X` is smooth, this equals the
**Picard group** `\mathop{\mathrm{Pic}}(X)`. We continue using del Pezzo
surface of degree 6 introduced above::
sage: Cl = dP6.rational_class_group(); Cl
The toric rational divisor class group
of a 2-d CPR-Fano toric variety covered by 6 affine patches
sage: Cl.ngens()
4
sage: c0,c1,c2,c3 = Cl.gens()
sage: c = c0 + 2*c1 - c3; c
Divisor class [1, 2, 0, -1]
Divisors are mapped to their classes and lifted via::
sage: Dx.divisor_class()
Divisor class [1, 0, 0, 0]
sage: Dx.divisor_class() in Cl
True
sage: (-Dw+Dv+Dy).divisor_class()
Divisor class [1, 0, 0, 0]
sage: c0
Divisor class [1, 0, 0, 0]
sage: c0.lift()
V(x)
The (rational) divisor class group is where the Kaehler cone lives::
sage: Kc = dP6.Kaehler_cone(); Kc
4-d cone in 4-d lattice
sage: Kc.rays()
Divisor class [0, 1, 1, 0],
Divisor class [0, 0, 1, 1],
Divisor class [1, 1, 0, 0],
Divisor class [1, 1, 1, 0],
Divisor class [0, 1, 1, 1]
in Basis lattice of The toric rational divisor class group
of a 2-d CPR-Fano toric variety covered by 6 affine patches
sage: Kc.ray(1).lift()
V(y) + V(v)
Given a divisor `D`, we have an associated line bundle (or a reflexive
sheaf, if `D` is not Cartier) `\mathcal{O}(D)`. Its sections are::
sage: P2 = toric_varieties.P2()
sage: H = P2.divisor(0); H
V(x)
sage: H.sections()
(M(-1, 0), M(-1, 1), M(0, 0))
sage: H.sections_monomials()
(z, y, x)
Note that the space of sections is always spanned by
monomials. Therefore, we can grade the sections (as homogeneous
monomials) by their weight under rescaling individual
coordinates. This weight data amounts to a point of the dual lattice.
In the same way, we can grade cohomology groups by their cohomological
degree and a weight::
sage: M = P2.fan().lattice().dual()
sage: H.cohomology(deg=0, weight=M(-1,0))
Vector space of dimension 1 over Rational Field
sage: _.dimension()
1
Here is a more complicated example with `h^1(dP_6, \mathcal{O}(D))=4` ::
sage: D = dP6.divisor([0, 0, -1, 0, 2, -1])
sage: D.cohomology()
{0: Vector space of dimension 0 over Rational Field,
1: Vector space of dimension 4 over Rational Field,
2: Vector space of dimension 0 over Rational Field}
sage: D.cohomology(dim=True)
(0, 4, 0)
AUTHORS:
- Volker Braun, Andrey Novoseltsev (2010-09-07): initial version.
"""
#*****************************************************************************
# Copyright (C) 2012 Volker Braun <vbraun.name@gmail.com>
# Copyright (C) 2012 Andrey Novoseltsev <novoselt@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.combinat.combination import Combinations
from sage.geometry.cone import is_Cone
from sage.geometry.polyhedron.constructor import Polyhedron
from sage.geometry.toric_lattice_element import is_ToricLatticeElement
from sage.homology.simplicial_complex import SimplicialComplex
from sage.matrix.constructor import matrix
from sage.misc.all import latex, flatten, prod
from sage.modules.all import vector
from sage.modules.free_module import (FreeModule_ambient_field,
FreeModule_ambient_pid)
from sage.rings.all import QQ, ZZ
from sage.schemes.generic.divisor import Divisor_generic
from sage.schemes.generic.divisor_group import DivisorGroup_generic
from sage.schemes.toric.divisor_class import ToricRationalDivisorClass
from sage.schemes.toric.variety import CohomologyRing, is_ToricVariety
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.element import is_Vector
#********************************************************
class ToricDivisorGroup(DivisorGroup_generic):
r"""
The group of (`\QQ`-T-Weil) divisors on a toric variety.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: P2.toric_divisor_group()
Group of toric ZZ-Weil divisors
on 2-d CPR-Fano toric variety covered by 3 affine patches
"""
def __init__(self, toric_variety, base_ring):
r"""
Construct an instance of :class:`ToricDivisorGroup`.
INPUT:
- ``toric_variety`` -- a
:class:`toric variety
<sage.schemes.toric.variety.ToricVariety_field>``;
- ``base_ring`` -- the coefficient ring of this divisor group,
usually `\ZZ` (default) or `\QQ`.
Implementation note: :meth:`__classcall__` sets the default
value for ``base_ring``.
OUTPUT:
Divisor group of the toric variety.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: from sage.schemes.toric.divisor import ToricDivisorGroup
sage: ToricDivisorGroup(P2, base_ring=ZZ)
Group of toric ZZ-Weil divisors
on 2-d CPR-Fano toric variety covered by 3 affine patches
Note that :class:`UniqueRepresentation` correctly distinguishes the
parent classes even if the schemes are the same::
sage: from sage.schemes.generic.divisor_group import DivisorGroup
sage: DivisorGroup(P2,ZZ) is ToricDivisorGroup(P2,ZZ)
False
"""
assert is_ToricVariety(toric_variety), str(toric_variety)+' is not a toric variety!'
super(ToricDivisorGroup, self).__init__(toric_variety, base_ring)
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: toric_varieties.P2().toric_divisor_group()._latex_()
'\\mathrm{Div_T}\\left(\\mathbb{P}_{\\Delta^{2}}, \\Bold{Z}\\right)'
"""
return (r"\mathrm{Div_T}\left(%s, %s\right)"
% (latex(self.scheme()), latex(self.base_ring())))
def _repr_(self):
"""
Return a string representation of the toric divisor group.
OUTPUT:
A string.
EXAMPLES::
sage: toric_varieties.P2().toric_divisor_group()._repr_()
'Group of toric ZZ-Weil divisors
on 2-d CPR-Fano toric variety covered by 3 affine patches'
"""
ring = self.base_ring()
if ring == ZZ:
base_ring_str = 'ZZ'
elif ring == QQ:
base_ring_str = 'QQ'
else:
base_ring_str = '('+str(ring)+')'
return 'Group of toric '+base_ring_str+'-Weil divisors on '+str(self.scheme())
def ngens(self):
r"""
Return the number of generators.
OUTPUT:
The number of generators of ``self``, which equals the number of
rays in the fan of the toric variety.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: TDiv = P2.toric_divisor_group()
sage: TDiv.ngens()
3
"""
return self.scheme().fan().nrays()
def gens(self):
r"""
Return the generators of the divisor group.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: TDiv = P2.toric_divisor_group()
sage: TDiv.gens()
(V(x), V(y), V(z))
"""
# Note: self._gens is originally incorrectly set by the parent class
if self._gens is None:
one = self.base_ring().one()
self._gens = tuple(ToricDivisor_generic([(one, c)], self)
for c in self.scheme().gens())
return self._gens
def gen(self,i):
r"""
Return the ``i``-th generator of the divisor group.
INPUT:
- ``i`` -- integer.
OUTPUT:
The divisor `z_i=0`, where `z_i` is the `i`-th homogeneous
coordinate.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: TDiv = P2.toric_divisor_group()
sage: TDiv.gen(2)
V(z)
"""
return self.gens()[i]
def _element_constructor_(self, x, check=True, reduce=True):
r"""
Construct a :class:`ToricDivisor_generic`
INPUT:
- ``x`` -- something defining a toric divisor, see
:func:`ToricDivisor`.
- ``check``, ``reduce`` -- boolean. See
:meth:`ToricDivisor_generic.__init__`.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: TDiv = P2.toric_divisor_group()
sage: TDiv._element_constructor_([ (1,P2.gen(2)) ])
V(z)
sage: TDiv( P2.fan(1)[0] )
V(x)
TESTS::
sage: TDiv(0) # Trac #12812
0
sage: TDiv(1) # Trac #12812
Traceback (most recent call last):
...
TypeError: 'sage.rings.integer.Integer' object is not iterable
"""
if is_ToricDivisor(x):
if x.parent() is self:
return x
else:
x = x._data
return ToricDivisor(self.scheme(), x, self.base_ring(), check, reduce)
def base_extend(self, R):
"""
Extend the scalars of ``self`` to ``R``.
INPUT:
- ``R`` -- ring.
OUTPUT:
- toric divisor group.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: DivZZ = P2.toric_divisor_group()
sage: DivQQ = P2.toric_divisor_group(base_ring=QQ)
sage: DivZZ.base_extend(QQ) is DivQQ
True
"""
# This check prevents extension to cohomology rings via coercion
if isinstance(R,CohomologyRing):
raise TypeError, 'Coefficient ring cannot be a cohomology ring.'
if self.base_ring().has_coerce_map_from(R):
return self
elif R.has_coerce_map_from(self.base_ring()):
return ToricDivisorGroup(self.scheme(), base_ring=R)
else:
raise ValueError("the base of %s cannot be extended to %s!"
% ( self, R))
#********************************************************
def is_ToricDivisor(x):
r"""
Test whether ``x`` is a toric divisor.
INPUT:
- ``x`` -- anything.
OUTPUT:
- ``True`` if ``x`` is an instance of :class:`ToricDivisor_generic` and
``False`` otherwise.
EXAMPLES::
sage: from sage.schemes.toric.divisor import is_ToricDivisor
sage: is_ToricDivisor(1)
False
sage: P2 = toric_varieties.P2()
sage: D = P2.divisor(0); D
V(x)
sage: is_ToricDivisor(D)
True
"""
return isinstance(x, ToricDivisor_generic)
#********************************************************
def ToricDivisor(toric_variety, arg=None, ring=None, check=True, reduce=True):
r"""
Construct a divisor of ``toric_variety``.
INPUT:
- ``toric_variety`` -- a :class:`toric variety
<sage.schemes.toric.variety.ToricVariety_field>`;
- ``arg`` -- one of the following description of the toric divisor to be
constructed:
* ``None`` or 0 (the trivial divisor);
* monomial in the homogeneous coordinates;
* one-dimensional cone of the fan of ``toric_variety`` or a lattice
point generating such a cone;
* sequence of rational numbers, specifying multiplicities for each of
the toric divisors.
- ``ring`` -- usually either `\ZZ` or `\QQ`. The base ring of the
divisor group. If ``ring`` is not specified, a coefficient ring
suitable for ``arg`` is derived.
- ``check`` -- bool (default: True). Whether to coerce
coefficients into base ring. Setting it to ``False`` can speed
up construction.
- ``reduce`` -- reduce (default: True). Whether to combine common
terms. Setting it to ``False`` can speed up construction.
.. WARNING::
The coefficients of the divisor must be in the base ring and
the terms must be reduced. If you set ``check=False`` and/or
``reduce=False`` it is your responsibility to pass valid input
data ``arg``.
OUTPUT:
- A :class:`sage.schemes.toric.divisor.ToricDivisor_generic`
EXAMPLES::
sage: from sage.schemes.toric.divisor import ToricDivisor
sage: dP6 = toric_varieties.dP6()
sage: ToricDivisor(dP6, [(1,dP6.gen(2)), (1,dP6.gen(1))])
V(u) + V(y)
sage: ToricDivisor(dP6, (0,1,1,0,0,0), ring=QQ)
V(u) + V(y)
sage: dP6.inject_variables()
Defining x, u, y, v, z, w
sage: ToricDivisor(dP6, u+y)
Traceback (most recent call last):
...
ValueError: u + y is not a monomial!
sage: ToricDivisor(dP6, u*y)
V(u) + V(y)
sage: ToricDivisor(dP6, dP6.fan(dim=1)[2] )
V(y)
sage: cone = Cone(dP6.fan(dim=1)[2])
sage: ToricDivisor(dP6, cone)
V(y)
sage: N = dP6.fan().lattice()
sage: ToricDivisor(dP6, N(1,1) )
V(w)
We attempt to guess the correct base ring::
sage: ToricDivisor(dP6, [(1/2,u)])
1/2*V(u)
sage: _.parent()
Group of toric QQ-Weil divisors on
2-d CPR-Fano toric variety covered by 6 affine patches
sage: ToricDivisor(dP6, [(1/2,u), (1/2,u)])
V(u)
sage: _.parent()
Group of toric ZZ-Weil divisors on
2-d CPR-Fano toric variety covered by 6 affine patches
sage: ToricDivisor(dP6, [(u,u)])
Traceback (most recent call last):
...
TypeError: Cannot deduce coefficient ring for [(u, u)]!
"""
assert is_ToricVariety(toric_variety)
##### First convert special arguments into lists
##### of multiplicities or (multiplicity,coordinate)
# Zero divisor
if arg is None or arg in ZZ and arg == 0:
arg = []
check = False
reduce = False
# Divisor by lattice point (corresponding to a ray)
if is_ToricLatticeElement(arg):
if arg not in toric_variety.fan().lattice():
raise ValueError("%s is not in the ambient lattice of %s!"
% (arg, toric_variety.fan()))
arg = toric_variety.fan().cone_containing(arg)
# Divisor by a one-cone
if is_Cone(arg):
fan = toric_variety.fan()
cone = fan.embed(arg)
if cone.dim() != 1:
raise ValueError("Only 1-dimensional cones of the toric variety "
"define divisors.")
arg = [(1, toric_variety.gen(cone.ambient_ray_indices()[0]))]
check = True # ensure that the 1 will be coerced into the coefficient ring
reduce = False
# Divisor by monomial
if arg in toric_variety.coordinate_ring():
if len(list(arg)) != 1:
raise ValueError("%s is not a monomial!" % arg)
arg = arg.exponents()[0]
# By now either we have converted arg to a list, or it is something else
# which should be convertible to a list
if not isinstance(arg, list):
try:
arg = list(arg)
except TypeError:
raise TypeError("%s does not define a divisor!" % arg)
##### Now convert a list of multiplicities into pairs multiplicity-coordinate
try:
assert all(len(item)==2 for item in arg)
except (AssertionError, TypeError):
n_rays = toric_variety.fan().nrays()
assert len(arg)==n_rays, \
'Argument list {0} is not of the required length {1}!' \
.format(arg, n_rays)
arg = zip(arg, toric_variety.gens())
reduce = False
##### Now we must have a list of multiplicity-coordinate pairs
assert all(len(item)==2 for item in arg)
if ring is None:
# if the coefficient ring was not given, try to use the most common ones.
try:
TDiv = ToricDivisorGroup(toric_variety, base_ring=ZZ)
return ToricDivisor_generic(arg, TDiv,
check=True, reduce=reduce)
except TypeError:
pass
try:
TDiv = ToricDivisorGroup(toric_variety, base_ring=QQ)
return ToricDivisor_generic(arg, TDiv,
check=True, reduce=reduce)
except TypeError:
raise TypeError("Cannot deduce coefficient ring for %s!" % arg)
TDiv = ToricDivisorGroup(toric_variety, ring)
return ToricDivisor_generic(arg, TDiv, check, reduce)
#********************************************************
class ToricDivisor_generic(Divisor_generic):
"""
Construct a :class:`(toric Weil) divisor <ToricDivisor_generic>` on the
given toric variety.
INPUT:
- ``v`` -- a list of tuples (multiplicity, coordinate).
- ``parent`` -- :class:`ToricDivisorGroup`. The parent divisor group.
- ``check`` -- boolean. Type-check the entries of ``v``, see
:meth:`sage.schemes.generic.divisor_group.DivisorGroup_generic.__init__`.
- ``reduce`` -- boolean. Combine coefficients in ``v``, see
:meth:`sage.schemes.generic.divisor_group.DivisorGroup_generic.__init__`.
.. WARNING::
Do not construct :class:`ToricDivisor_generic` objects manually.
Instead, use either the function :func:`ToricDivisor` or the method
:meth:`~sage.schemes.toric.variety.ToricVariety_field.divisor`
of toric varieties.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: ray = dP6.fan().ray(0)
sage: ray
N(0, 1)
sage: D = dP6.divisor(ray); D
V(x)
sage: D.parent()
Group of toric ZZ-Weil divisors
on 2-d CPR-Fano toric variety covered by 6 affine patches
"""
def __init__(self, v, parent, check=True, reduce=True):
"""
See :class:`ToricDivisor_generic` for documentation.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: from sage.schemes.toric.divisor import ToricDivisor_generic
sage: TDiv = dP6.toric_divisor_group()
sage: ToricDivisor_generic([], TDiv)
0
sage: ToricDivisor_generic([(2,dP6.gen(1))], TDiv)
2*V(u)
"""
super(ToricDivisor_generic,self).__init__(v, parent, check, reduce)
def _vector_(self, ring=None):
r"""
Return a vector representation.
INPUT:
- ``ring`` -- a ring (usually `\ZZ` or `\QQ`) for the
coefficients to live in). This is an optional argument, by
default a suitable ring is chosen automatically.
OUTPUT:
A vector whose ``self.scheme().fan().nrays()`` components are
the coefficients of the divisor.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: D = dP6.divisor((0,1,1,0,0,0)); D
V(u) + V(y)
sage: D._vector_()
(0, 1, 1, 0, 0, 0)
sage: vector(D) # syntactic sugar
(0, 1, 1, 0, 0, 0)
sage: type( vector(D) )
<type 'sage.modules.vector_integer_dense.Vector_integer_dense'>
sage: D_QQ = dP6.divisor((0,1,1,0,0,0), base_ring=QQ);
sage: vector(D_QQ)
(0, 1, 1, 0, 0, 0)
sage: type( vector(D_QQ) )
<type 'sage.modules.vector_rational_dense.Vector_rational_dense'>
The vector representation is a suitable input for :func:`ToricDivisor` ::
sage: dP6.divisor(vector(D)) == D
True
"""
if ring is None:
ring = self.base_ring()
X = self.parent().scheme()
v = vector(ring, [0]*X.ngens())
for coeff, variable in self:
v[ X.gens().index(variable) ] += coeff
return v
def coefficient(self, x):
r"""
Return the coefficient of ``x``.
INPUT:
- ``x`` -- one of the homogeneous coordinates, either given by
the variable or its index.
OUTPUT:
The coefficient of ``x``.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: D = P2.divisor((11,12,13)); D
11*V(x) + 12*V(y) + 13*V(z)
sage: D.coefficient(1)
12
sage: P2.inject_variables()
Defining x, y, z
sage: D.coefficient(y)
12
"""
try:
index = ZZ(x)
variable = self.parent().scheme().gen(index)
except TypeError:
variable = x
for coeff, var in self:
if var == variable:
return coeff
return self.base_ring().zero()
def function_value(self, point):
r"""
Return the value of the support function at ``point``.
Let `X` be the ambient toric variety of ``self``, `\Sigma` the fan
associated to `X`, and `N` the ambient lattice of `\Sigma`.
INPUT:
- ``point`` -- either an integer, interpreted as the index of a ray of
`\Sigma`, or a point of the lattice `N`.
OUTPUT:
- an interger or a rational number.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: D = P2.divisor([11,22,44]) # total degree 77
sage: D.function_value(0)
11
sage: N = P2.fan().lattice()
sage: D.function_value( N(1,1) )
33
sage: D.function_value( P2.fan().ray(0) )
11
"""
if not self.is_QQ_Cartier():
raise ValueError("support functions are associated to QQ-Cartier "
"divisors only, %s is not QQ-Cartier!" % self)
try:
index = ZZ(point)
return self.coefficient(index)
except TypeError:
pass
fan = self.parent().scheme().fan()
assert point in fan.lattice(), 'The point '+str(point)+' is not in the N-lattice.'
cone = fan.cone_containing(point)
return point * self.m(cone)
def m(self, cone):
r"""
Return `m_\sigma` representing `\phi_D` on ``cone``.
Let `X` be the ambient toric variety of this divisor `D` associated to
the fan `\Sigma` in lattice `N`. Let `M` be the lattice dual to `N`.
Given the cone `\sigma =\langle v_1, \dots, v_k \rangle` in `\Sigma`,
this method searches for a vector `m_\sigma \in M_\QQ` such that
`\phi_D(v_i) = \langle m_\sigma, v_i \rangle` for all `i=1, \dots, k`,
where `\phi_D` is the support function of `D`.
INPUT:
- ``cone`` -- A cone in the fan of the toric variety.
OUTPUT:
- If possible, a point of lattice `M`.
- If the dual vector cannot be chosen integral, a rational vector is
returned.
- If there is no such vector (i.e. ``self`` is not even a
`\QQ`-Cartier divisor), a ``ValueError`` is raised.
EXAMPLES::
sage: F = Fan(cones=[(0,1,2,3), (0,1,4)],
... rays=[(1,1,1), (1,-1,1), (1,-1,-1), (1,1,-1), (0,0,1)])
sage: X = ToricVariety(F)
sage: square_cone = X.fan().cone_containing(0,1,2,3)
sage: triangle_cone = X.fan().cone_containing(0,1,4)
sage: ray = X.fan().cone_containing(0)
sage: QQ_Cartier = X.divisor([2,2,1,1,1])
sage: QQ_Cartier.m(ray)
M(0, 2, 0)
sage: QQ_Cartier.m(square_cone)
(3/2, 0, 1/2)
sage: QQ_Cartier.m(triangle_cone)
M(1, 0, 1)
sage: QQ_Cartier.m(Cone(triangle_cone))
M(1, 0, 1)
sage: Weil = X.divisor([1,1,1,0,0])
sage: Weil.m(square_cone)
Traceback (most recent call last):
...
ValueError: V(z0) + V(z1) + V(z2) is not QQ-Cartier,
cannot choose a dual vector on 3-d cone
of Rational polyhedral fan in 3-d lattice N!
sage: Weil.m(triangle_cone)
M(1, 0, 0)
"""
try:
return self._m[cone]
except AttributeError:
self._m = {}
except KeyError:
pass
X = self.parent().scheme()
M = X.fan().dual_lattice()
fan = X.fan()
cone = fan.embed(cone)
if cone.is_trivial():
m = M(0)
self._m[cone] = m
return m
assert cone.ambient() is fan
b = vector(self.coefficient(i) for i in cone.ambient_ray_indices())
A = cone.rays().column_matrix()
try:
if cone.dim() == X.dimension():
# either unique solution or ValueError (if not QQ-Cartier)
m = A.solve_left(b) # A m = b
else:
# under-determined system; try to find integral solution
D,U,V = A.smith_form() # D = U*A*V
bV = b*V
m = D.solve_left(bV) * U
except ValueError:
raise ValueError("%s is not QQ-Cartier, cannot choose a dual "
"vector on %s!" % (self, cone))
try:
m = M(m)
except TypeError: # not integral
pass
self._m[cone] = m
return m
def is_Weil(self):
"""
Return whether the divisor is a Weil-divisor.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: D = P2.divisor([1,2,3])
sage: D.is_Weil()
True
sage: (D/2).is_Weil()
False
"""
if self.base_ring() == ZZ:
return True
try:
vector(ZZ, vector(self))
return True
except TypeError:
return False
def is_QQ_Weil(self):
r"""
Return whether the divisor is a `\QQ`-Weil-divisor.
.. NOTE::
This function returns always ``True`` since
:class:`ToricDivisor <ToricDivisor_generic>` can only
describe `\QQ`-Weil divisors.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: D = P2.divisor([1,2,3])
sage: D.is_QQ_Weil()
True
sage: (D/2).is_QQ_Weil()
True
"""
return True
def is_Cartier(self):
r"""
Return whether the divisor is a Cartier-divisor.
.. NOTE::
The sheaf `\mathcal{O}(D)` associated to the given divisor
`D` is a line bundle if and only if the divisor is
Cartier.
EXAMPLES::
sage: X = toric_varieties.P4_11169()
sage: D = X.divisor(3)
sage: D.is_Cartier()
False
sage: D.is_QQ_Cartier()
True
"""
try:
return self._is_Cartier
except AttributeError:
pass
self._is_Cartier = self.is_QQ_Cartier()
if self._is_Cartier:
fan = self.parent().scheme().fan()
M = fan.dual_lattice()
self._is_Cartier = all(self.m(c) in M for c in fan)
return self._is_Cartier
def is_QQ_Cartier(self):
"""
Return whether the divisor is a `\QQ`-Cartier divisor.
A `\QQ`-Cartier divisor is a divisor such that some multiple
of it is Cartier.
EXAMPLES::
sage: X = toric_varieties.P4_11169()
sage: D = X.divisor(3)
sage: D.is_QQ_Cartier()
True
sage: X = toric_varieties.Cube_face_fan()
sage: D = X.divisor(3)
sage: D.is_QQ_Cartier()
False
"""
try:
return self._is_QQ_Cartier
except AttributeError:
pass
try:
[self.m(c) for c in self.parent().scheme().fan()]
self._is_QQ_Cartier = True
except ValueError:
self._is_QQ_Cartier = False
return self._is_QQ_Cartier
def is_integral(self):
r"""
Return whether the coefficients of the divisor are all integral.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: DZZ = P2.toric_divisor_group(base_ring=ZZ).gen(0); DZZ
V(x)
sage: DQQ = P2.toric_divisor_group(base_ring=QQ).gen(0); DQQ
V(x)
sage: DZZ.is_integral()
True
sage: DQQ.is_integral()
True
"""
return all( coeff in ZZ for coeff, variable in self )
def move_away_from(self, cone):
"""
Move the divisor away from the orbit closure of ``cone``.
INPUT:
- A ``cone`` of the fan of the toric variety.
OUTPUT:
A (rationally equivalent) divisor that is moved off the
orbit closure of the given cone.
.. NOTE::
A divisor that is Weil but not Cartier might be impossible
to move away. In this case, a ``ValueError`` is raised.
EXAMPLES::
sage: F = Fan(cones=[(0,1,2,3), (0,1,4)],
... rays=[(1,1,1), (1,-1,1), (1,-1,-1), (1,1,-1), (0,0,1)])
sage: X = ToricVariety(F)
sage: square_cone = X.fan().cone_containing(0,1,2,3)
sage: triangle_cone = X.fan().cone_containing(0,1,4)
sage: line_cone = square_cone.intersection(triangle_cone)
sage: Cartier = X.divisor([2,2,1,1,1])
sage: Cartier
2*V(z0) + 2*V(z1) + V(z2) + V(z3) + V(z4)
sage: Cartier.move_away_from(line_cone)
-V(z2) - V(z3) + V(z4)
sage: QQ_Weil = X.divisor([1,0,1,1,0])
sage: QQ_Weil.move_away_from(line_cone)
V(z2)
"""
m = self.m(cone)
X = self.parent().scheme()
fan = X.fan()
if m in fan.lattice():
ring = self._ring
else:
ring = m.base_ring()
divisor = list(vector(self))
values = [mult - m * ray for mult, ray in zip(divisor, fan.rays())]
return ToricDivisor(X, values, ring=ring)
def cohomology_class(self):
r"""
Return the degree-2 cohomology class associated to the divisor.
OUTPUT:
Returns the corresponding cohomology class as an instance of
:class:`~sage.schemes.toric.variety.CohomologyClass`.
The cohomology class is the first Chern class of the
associated line bundle `\mathcal{O}(D)`.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: D = dP6.divisor(dP6.fan().ray(0) )
sage: D.cohomology_class()
[y + v - w]
"""
divisor = vector(self)
variety = self.parent().scheme()
HH = variety.cohomology_ring()
return sum([ divisor[i] * HH.gen(i) for i in range(0,HH.ngens()) ])
def Chern_character(self):
r"""
Return the Chern character of the sheaf `\mathcal{O}(D)`
defined by the divisor `D`.
You can also use a shortcut :meth:`ch`.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: N = dP6.fan().lattice()
sage: D3 = dP6.divisor(dP6.fan().cone_containing( N(0,1) ))
sage: D5 = dP6.divisor(dP6.fan().cone_containing( N(-1,-1) ))
sage: D6 = dP6.divisor(dP6.fan().cone_containing( N(0,-1) ))
sage: D = -D3 + 2*D5 - D6
sage: D.Chern_character()
[5*w^2 + y - 2*v + w + 1]
sage: dP6.integrate( D.ch() * dP6.Td() )
-4
"""
return self.cohomology_class().exp()
ch = Chern_character
def divisor_class(self):
r"""
Return the linear equivalence class of the divisor.
OUTPUT:
Returns the class of the divisor in `\mathop{Cl}(X)
\otimes_\ZZ \QQ` as an instance of
:class:`ToricRationalDivisorClassGroup`.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: D = dP6.divisor(0)
sage: D.divisor_class()
Divisor class [1, 0, 0, 0]
"""
if '_divisor_class' not in self.__dict__:
self._divisor_class = self.parent().scheme().rational_class_group()(self)
return self._divisor_class
def Chow_cycle(self, ring=ZZ):
r"""
Returns the Chow homology class of the divisor.
INPUT:
- ``ring`` -- Either ``ZZ`` (default) or ``QQ``. The base ring
of the Chow group.
OUTPUT:
The :class:`~sage.schemes.toric.chow_group.ChowCycle`
represented by the divisor.
EXAMPLES:
sage: dP6 = toric_varieties.dP6()
sage: cone = dP6.fan(1)[0]
sage: D = dP6.divisor(cone); D
V(x)
sage: D.Chow_cycle()
( 0 | -1, 0, 1, 1 | 0 )
sage: dP6.Chow_group()(cone)
( 0 | -1, 0, 1, 1 | 0 )
"""
toric_variety = self.parent().scheme()
fan = toric_variety.fan()
A = toric_variety.Chow_group(ring)
return sum( self.coefficient(i) * A(cone_1d)
for i, cone_1d in enumerate(fan(dim=1)) )
def is_ample(self):
"""
Return whether a `\QQ`-Cartier divisor is ample.
OUTPUT:
- ``True`` if the divisor is in the ample cone, ``False`` otherwise.
.. NOTE::
* For a QQ-Cartier divisor, some positive integral
multiple is Cartier. We return wheher this associtated
divisor is ample, i.e. corresponds to an ample line bundle.
* In the orbifold case, the ample cone is an open
and full-dimensional cone in the rational divisor class
group :class:`ToricRationalDivisorClassGroup`.
* If the variety has worse than orbifold singularities,
the ample cone is a full-dimensional cone within the
(not full-dimensional) subspace spanned by the Cartier
divisors inside the rational (Weil) divisor class group,
that is, :class:`ToricRationalDivisorClassGroup`. The
ample cone is then relative open (open in this
subspace).
* See also :meth:`is_nef`.
* A toric divisor is ample if and only if its support
function is strictly convex.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: K = P2.K()
sage: (+K).is_ample()
False
sage: (0*K).is_ample()
False
sage: (-K).is_ample()
True
Example 6.1.3, 6.1.11, 6.1.17 of [CLS]_::
sage: fan = Fan(cones=[(0,1), (1,2), (2,3), (3,0)],
... rays=[(-1,2), (0,1), (1,0), (0,-1)])
sage: F2 = ToricVariety(fan,'u1, u2, u3, u4')
sage: def D(a,b): return a*F2.divisor(2) + b*F2.divisor(3)
...
sage: [ (a,b) for a,b in CartesianProduct(range(-3,3),range(-3,3))
... if D(a,b).is_ample() ]
[(1, 1), (1, 2), (2, 1), (2, 2)]
sage: [ (a,b) for a,b in CartesianProduct(range(-3,3),range(-3,3))
... if D(a,b).is_nef() ]
[(0, 0), (0, 1), (0, 2), (1, 0),
(1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]
A (worse than orbifold) singular Fano threefold::
sage: points = [(1,0,0),(0,1,0),(0,0,1),(-2,0,-1),(-2,-1,0),(-3,-1,-1),(1,1,1)]
sage: facets = [[0,1,3],[0,1,6],[0,2,4],[0,2,6],[0,3,5],[0,4,5],[1,2,3,4,5,6]]
sage: X = ToricVariety(Fan(cones=facets, rays=points))
sage: X.rational_class_group().dimension()
4
sage: X.Kaehler_cone().rays()
Divisor class [1, 0, 0, 0]
in Basis lattice of The toric rational divisor class group
of a 3-d toric variety covered by 7 affine patches
sage: antiK = -X.K()
sage: antiK.divisor_class()
Divisor class [2, 0, 0, 0]
sage: antiK.is_ample()
True
"""
try:
return self._is_ample
except AttributeError:
pass
assert self.is_QQ_Cartier(), 'The divisor must be QQ-Cartier.'
Kc = self.parent().scheme().Kaehler_cone()
self._is_ample = Kc.relative_interior_contains(self.divisor_class())
return self._is_ample
def is_nef(self):
"""
Return whether a `\QQ`-Cartier divisor is nef.
OUTPUT:
- ``True`` if the divisor is in the closure of the ample cone,
``False`` otherwise.
.. NOTE::
* For a `\QQ`-Cartier divisor, some positive integral multiple is
Cartier. We return wheher this associtated divisor is nef.
* The nef cone is the closure of the ample cone.
* See also :meth:`is_ample`.
* A toric divisor is nef if and only if its support
function is convex (but not necessarily strictly
convex).
* A toric Cartier divisor is nef if and only if its linear
system is basepoint free.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: K = P2.K()
sage: (+K).is_nef()
False
sage: (0*K).is_nef()
True
sage: (-K).is_nef()
True
Example 6.1.3, 6.1.11, 6.1.17 of [CLS]_::
sage: fan = Fan(cones=[(0,1), (1,2), (2,3), (3,0)],
... rays=[(-1,2), (0,1), (1,0), (0,-1)])
sage: F2 = ToricVariety(fan,'u1, u2, u3, u4')
sage: def D(a,b): return a*F2.divisor(2) + b*F2.divisor(3)
...
sage: [ (a,b) for a,b in CartesianProduct(range(-3,3),range(-3,3))
... if D(a,b).is_ample() ]
[(1, 1), (1, 2), (2, 1), (2, 2)]
sage: [ (a,b) for a,b in CartesianProduct(range(-3,3),range(-3,3))
... if D(a,b).is_nef() ]
[(0, 0), (0, 1), (0, 2), (1, 0),
(1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]
"""
try:
return self._is_nef
except AttributeError:
pass
assert self.is_QQ_Cartier(), 'The divisor must be QQ-Cartier.'
self._is_nef = self.divisor_class() in self.parent().scheme().Kaehler_cone()
return self._is_nef
def polyhedron(self):
r"""
Return the polyhedron `P_D\subset M` associated to a toric
divisor `D`.
OUTPUT:
`P_D` as an instance of :class:`~sage.geometry.polyhedron.base.Polyhedron_base`.
EXAMPLES::
sage: dP7 = toric_varieties.dP7()
sage: D = dP7.divisor(2)
sage: P_D = D.polyhedron(); P_D
A 0-dimensional polyhedron in QQ^2 defined as the convex hull of 1 vertex
sage: P_D.Vrepresentation()
(A vertex at (0, 0),)
sage: D.is_nef()
False
sage: dP7.integrate( D.ch() * dP7.Td() )
1
sage: P_antiK = (-dP7.K()).polyhedron(); P_antiK
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 5 vertices
sage: P_antiK.Vrepresentation()
(A vertex at (1, -1), A vertex at (0, 1), A vertex at (1, 0),
A vertex at (-1, 1), A vertex at (-1, -1))
sage: P_antiK.integral_points()
((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 0), (0, 1), (1, -1), (1, 0))
Example 6.1.3, 6.1.11, 6.1.17 of [CLS]_::
sage: fan = Fan(cones=[(0,1), (1,2), (2,3), (3,0)],
... rays=[(-1,2), (0,1), (1,0), (0,-1)])
sage: F2 = ToricVariety(fan,'u1, u2, u3, u4')
sage: D = F2.divisor(3)
sage: D.polyhedron().Vrepresentation()
(A vertex at (0, 0), A vertex at (2, 1), A vertex at (0, 1))
sage: Dprime = F2.divisor(1) + D
sage: Dprime.polyhedron().Vrepresentation()
(A vertex at (2, 1), A vertex at (0, 1), A vertex at (0, 0))
sage: D.is_ample()
False
sage: D.is_nef()
True
sage: Dprime.is_nef()
False
A more complicated example where `P_D` is not a lattice polytope::
sage: X = toric_varieties.BCdlOG_base()
sage: antiK = -X.K()
sage: P_D = antiK.polyhedron()
sage: P_D
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 8 vertices
sage: P_D.Vrepresentation()
(A vertex at (1, -1, 0), A vertex at (1, -3, 1),
A vertex at (1, 1, 1), A vertex at (-5, 1, 1),
A vertex at (1, 1, -1/2), A vertex at (1, 1/2, -1/2),
A vertex at (-1, -1, 0), A vertex at (-5, -3, 1))
sage: P_D.Hrepresentation()
(An inequality (-1, 0, 0) x + 1 >= 0, An inequality (0, -1, 0) x + 1 >= 0,
An inequality (0, 0, -1) x + 1 >= 0, An inequality (1, 0, 4) x + 1 >= 0,
An inequality (0, 1, 3) x + 1 >= 0, An inequality (0, 1, 2) x + 1 >= 0)
sage: P_D.integral_points()
((-1, -1, 0), (0, -1, 0), (1, -1, 0), (-1, 0, 0), (0, 0, 0),
(1, 0, 0), (-1, 1, 0), (0, 1, 0), (1, 1, 0), (-5, -3, 1),
(-4, -3, 1), (-3, -3, 1), (-2, -3, 1), (-1, -3, 1), (0, -3, 1),
(1, -3, 1), (-5, -2, 1), (-4, -2, 1), (-3, -2, 1), (-2, -2, 1),
(-1, -2, 1), (0, -2, 1), (1, -2, 1), (-5, -1, 1), (-4, -1, 1),
(-3, -1, 1), (-2, -1, 1), (-1, -1, 1), (0, -1, 1), (1, -1, 1),
(-5, 0, 1), (-4, 0, 1), (-3, 0, 1), (-2, 0, 1), (-1, 0, 1),
(0, 0, 1), (1, 0, 1), (-5, 1, 1), (-4, 1, 1), (-3, 1, 1),
(-2, 1, 1), (-1, 1, 1), (0, 1, 1), (1, 1, 1))
"""
try:
return self._polyhedron
except AttributeError:
pass
fan = self.parent().scheme().fan()
divisor = vector(self)
ieqs = [ [divisor[i]] + list(fan.ray(i)) for i in range(fan.nrays()) ]
self._polyhedron = Polyhedron(ieqs=ieqs)
return self._polyhedron
def sections(self):
"""
Return the global sections (as points of the `M`-lattice) of
the line bundle (or reflexive sheaf) associated to the
divisor.
OUTPUT:
- :class:`tuple` of points of lattice `M`.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: P2.fan().nrays()
3
sage: P2.divisor(0).sections()
(M(-1, 0), M(-1, 1), M(0, 0))
sage: P2.divisor(1).sections()
(M(0, -1), M(0, 0), M(1, -1))
sage: P2.divisor(2).sections()
(M(0, 0), M(0, 1), M(1, 0))
The divisor can be non-nef yet still have sections::
sage: rays = [(1,0,0),(0,1,0),(0,0,1),(-2,0,-1),(-2,-1,0),(-3,-1,-1),(1,1,1),(-1,0,0)]
sage: cones = [[0,1,3],[0,1,6],[0,2,4],[0,2,6],[0,3,5],[0,4,5],[1,3,7],[1,6,7],[2,4,7],[2,6,7],[3,5,7],[4,5,7]]
sage: X = ToricVariety(Fan(rays=rays,cones=cones))
sage: D = X.divisor(2); D
V(z2)
sage: D.is_nef()
False
sage: D.sections()
(M(0, 0, 0),)
sage: D.cohomology(dim=True)
(1, 0, 0, 0)
"""
try:
return self._sections
except AttributeError:
pass
M = self.parent().scheme().fan().dual_lattice()
self._sections = tuple(M(m)
for m in self.polyhedron().integral_points())
return self._sections
def sections_monomials(self):
"""
Return the global sections of the line bundle associated to the
Cartier divisor.
The sections are described as monomials in the generalized homogeneous
coordinates.
OUTPUT:
- tuple of monomials in the coordinate ring of ``self``.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: P2.fan().nrays()
3
sage: P2.divisor(0).sections_monomials()
(z, y, x)
sage: P2.divisor(1).sections_monomials()
(z, y, x)
sage: P2.divisor(2).sections_monomials()
(z, y, x)
From [CoxTutorial]_ page 38::
sage: from sage.geometry.lattice_polytope import LatticePolytope
sage: lp = LatticePolytope(matrix([[1,1,0,-1,0], [0,1,1,0,-1]]))
sage: lp
A lattice polytope: 2-dimensional, 5 vertices.
sage: dP7 = ToricVariety( FaceFan(lp), 'x1, x2, x3, x4, x5')
sage: AK = -dP7.K()
sage: AK.sections()
(M(-1, 0), M(-1, 1), M(0, -1), M(0, 0),
M(0, 1), M(1, -1), M(1, 0), M(1, 1))
sage: AK.sections_monomials()
(x3*x4^2*x5, x2*x3^2*x4^2, x1*x4*x5^2, x1*x2*x3*x4*x5,
x1*x2^2*x3^2*x4, x1^2*x2*x5^2, x1^2*x2^2*x3*x5, x1^2*x2^3*x3^2)
REFERENCES:
.. [CoxTutorial]
David Cox, "What is a Toric Variety",
http://www.cs.amherst.edu/~dac/lectures/tutorial.ps
"""
return tuple(self.monomial(m) for m in self.sections())
def monomial(self, point):
r"""
Return the monomial in the homogeneous coordinate ring
associated to the ``point`` in the dual lattice.
INPUT:
- ``point`` -- a point in ``self.variety().fan().dual_lattice()``.
OUTPUT:
For a fixed divisor ``D``, the sections are generated by
monomials in :meth:`ToricVariety.coordinate_ring
<sage.schemes.toric.variety.ToricVariety_field.coordinate_ring>`.
Alternatively, the monomials can be described as `M`-lattice
points in the polyhedron ``D.polyhedron()``. This method
converts the points `m\in M` into homogeneous polynomials.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: O3_P2 = -P2.K()
sage: M = P2.fan().dual_lattice()
sage: O3_P2.monomial( M(0,0) )
x*y*z
"""
X = self.parent().scheme()
fan = X.fan()
assert point in fan.dual_lattice(), \
str(point)+' must be a point in the M-lattice'
R = X.coordinate_ring()
return prod([ R.gen(i) ** (point*fan.ray(i) + self.coefficient(i))
for i in range(fan.nrays()) ])
def Kodaira_map(self, names='z'):
r"""
Return the Kodaira map.
The Kodaira map is the rational map $X_\Sigma \to
\mathbb{P}^{n-1}$, where $n$ equals the number of sections. It
is defined by the monomial sections of the line bundle.
If the divisor is ample and the toric variety smooth or of
dimension 2, then this is an embedding.
INPUT:
- ``names`` -- string (optional; default ``'z'``). The
variable names for the destination projective space.
EXAMPLES::
sage: P1.<u,v> = toric_varieties.P1()
sage: D = -P1.K()
sage: D.Kodaira_map()
Scheme morphism:
From: 1-d CPR-Fano toric variety covered by 2 affine patches
To: Closed subscheme of Projective Space of dimension 2
over Rational Field defined by:
-z1^2 + z0*z2
Defn: Defined on coordinates by sending [u : v] to
(v^2 : u*v : u^2)
sage: dP6 = toric_varieties.dP6()
sage: D = -dP6.K()
sage: D.Kodaira_map(names='x')
Scheme morphism:
From: 2-d CPR-Fano toric variety covered by 6 affine patches
To: Closed subscheme of Projective Space of dimension 6
over Rational Field defined by:
-x1*x5 + x0*x6,
-x2*x3 + x0*x5,
-x1*x3 + x0*x4,
x4*x5 - x3*x6,
-x1*x2 + x0*x3,
x3*x5 - x2*x6,
x3*x4 - x1*x6,
x3^2 - x1*x5,
x2*x4 - x1*x5,
-x1*x5^2 + x2*x3*x6,
-x1*x5^3 + x2^2*x6^2
Defn: Defined on coordinates by sending [x : u : y : v : z : w] to
(x*u^2*y^2*v : x^2*u^2*y*w : u*y^2*v^2*z : x*u*y*v*z*w :
x^2*u*z*w^2 : y*v^2*z^2*w : x*v*z^2*w^2)
"""
sections = self.sections_monomials()
if len(sections) == 0:
raise ValueError('The Kodaira map is not defined for divisors without sections.')
src = self.parent().scheme()
from sage.schemes.projective.projective_space import ProjectiveSpace
ambient = ProjectiveSpace(src.base_ring(), len(sections) - 1, names=names)
A = matrix(ZZ, [list(s.exponents()[0]) for s in sections]).transpose()
from sage.schemes.toric.ideal import ToricIdeal
IA = ToricIdeal(A, names=names)
dst = ambient.subscheme(IA)
homset = src.Hom(dst)
return homset(sections)
def _sheaf_complex(self, m):
r"""
Return a simplicial complex whose cohomology is isomorphic to the
`m\in M`-graded piece of the sheaf cohomology.
Helper for :meth:`cohomology`.
INPUT:
- `m` -- a point in ``self.scheme().fan().dual_lattice()``.
OUTPUT:
- :class:`simplicial complex
<sage.homology.simplicial_complex.SimplicialComplex>`.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: D0 = dP6.divisor(0)
sage: D2 = dP6.divisor(2)
sage: D3 = dP6.divisor(3)
sage: D = -D0 + 2*D2 - D3
sage: M = dP6.fan().dual_lattice()
sage: D._sheaf_complex( M(1,0) )
Simplicial complex with vertex set (0, 1, 3) and facets {(3,), (0, 1)}
"""
fan = self.parent().scheme().fan()
ray_is_negative = [ m*ray + self.coefficient(i) < 0
for i, ray in enumerate(fan.rays()) ]
def cone_is_negative(cone): # and non-trivial
if cone.is_trivial():
return False
return all(ray_is_negative[i] for i in cone.ambient_ray_indices())
negative_cones = filter(cone_is_negative, flatten(fan.cones()))
return SimplicialComplex([c.ambient_ray_indices() for c in negative_cones])
def _sheaf_cohomology(self, cplx):
"""
Returns the sheaf cohomology as the shifted, reduced cohomology
of the complex.
Helper for :meth:`cohomology`.
INPUT:
- ``cplx`` -- simplicial complex.
OUTPUT:
- integer vector.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: D = dP6.divisor(1)
sage: D._sheaf_cohomology( SimplicialComplex() )
(1, 0, 0)
sage: D._sheaf_cohomology( SimplicialComplex([[1,2],[2,3],[3,1]]) )
(0, 0, 1)
A more complicated example to test that trac #10731 is fixed::
sage: cell24 = Polyhedron(vertices=[
... (1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1),(1,-1,-1,1),(0,0,-1,1),
... (0,-1,0,1),(-1,0,0,1),(1,0,0,-1),(0,1,0,-1),(0,0,1,-1),(-1,1,1,-1),
... (1,-1,-1,0),(0,0,-1,0),(0,-1,0,0),(-1,0,0,0),(1,-1,0,0),(1,0,-1,0),
... (0,1,1,-1),(-1,1,1,0),(-1,1,0,0),(-1,0,1,0),(0,-1,-1,1),(0,0,0,-1)])
sage: X = ToricVariety(FaceFan(cell24.lattice_polytope())) # long time
sage: D = -X.divisor(0) # long time
sage: D.cohomology(dim=True) # long time
(0, 0, 0, 0, 0)
"""
d = self.parent().scheme().dimension()
if cplx.dimension()==-1:
return vector(ZZ, [1] + [0]*d)
HH = cplx.homology(base_ring=QQ, cohomology=True)
HH_list = [0]*(d+1)
for h in HH.iteritems():
degree = h[0]+1
cohomology_dim = h[1].dimension()
if degree>d or degree<0:
assert(cohomology_dim==0)
continue
HH_list[ degree ] = cohomology_dim
return vector(ZZ, HH_list)
def _sheaf_cohomology_support(self):
r"""
Return the weights for which the cohomology groups can be non-vanishing.
OUTPUT:
A :class:`~sage.geometry.polyhedron.base.Polyhedron_base`
object that contains all weights `m` for which the sheaf
cohomology is *potentially* non-vanishing.
ALGORITHM:
See :meth:`cohomology` and note that every `d`-tuple
(`d`=dimension of the variety) of rays determines one vertex
in the chamber decomposition if none of the hyperplanes are
parallel.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: D0 = dP6.divisor(0)
sage: D2 = dP6.divisor(2)
sage: D3 = dP6.divisor(3)
sage: D = -D0 + 2*D2 - D3
sage: supp = D._sheaf_cohomology_support()
sage: supp
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 4 vertices
sage: supp.Vrepresentation()
(A vertex at (-1, 1), A vertex at (0, -1), A vertex at (3, -1), A vertex at (0, 2))
"""
X = self.parent().scheme()
fan = X.fan()
if not X.is_complete():
raise ValueError("%s is not complete, its cohomology is not "
"finite-dimensional!" % X)
d = X.dimension()
chamber_vertices = []
for pindexlist in Combinations(range(0,fan.nrays()), d):
A = matrix(ZZ, [fan.ray(p) for p in pindexlist])
b = vector([ self.coefficient(p) for p in pindexlist ])
try:
chamber_vertices.append(A.solve_right(-b))
except ValueError:
pass
return Polyhedron(vertices=chamber_vertices)
def cohomology(self, weight=None, deg=None, dim=False):
r"""
Return the cohomology of the line bundle associated to the
Cartier divisor or reflexive sheaf associated to the Weil
divisor.
.. NOTE::
The cohomology of a toric line bundle/reflexive sheaf is
graded by the usual degree as well as by the `M`-lattice.
INPUT:
- ``weight`` -- (optional) a point of the `M`-lattice.
- ``deg`` -- (optional) the degree of the cohomology group.
- ``dim`` -- boolean. If ``False`` (default), the cohomology
groups are returned as vector spaces. If ``True``, only the
dimension of the vector space(s) is returned.
OUTPUT:
The vector space `H^\text{deg}(X,\mathcal{O}(D))` (if ``deg``
is specified) or a dictionary ``{degree:cohomology(degree)}``
of all degrees between 0 and the dimension of the variety.
If ``weight`` is specified, return only the subspace
`H^\text{deg}(X,\mathcal{O}(D))_\text{weight}` of the
cohomology of the given weight.
If ``dim==True``, the dimension of the cohomology vector space
is returned instead of actual vector space. Moreover, if
``deg`` was not specified, a vector whose entries are the
dimensions is returned instead of a dictionary.
ALGORITHM:
Roughly, Chech cohomology is used to compute the
cohomology. For toric divisors, the local sections can be
chosen to be monomials (instead of general homogeneous
polynomials), this is the reason for the extra grading by
`m\in M`. General refrences would be [Fulton]_, [CLS]_. Here
are some salient features of our implementation:
* First, a finite set of `M`-lattice points is identified that
supports the cohomology. The toric divisor determines a
(polyhedral) chamber decomposition of `M_\RR`, see Section
9.1 and Figure 4 of [CLS]_. The cohomology vanishes on the
non-compact chambers. Hence, the convex hull of the vertices
of the chamber decomposition contains all non-vanishing
cohomology groups. This is returned by the private method
:meth:`_sheaf_cohomology_support`.
It would be more efficient, but more difficult to implement,
to keep track of all of the individual chambers. We leave
this for future work.
* For each point `m\in M`, the weight-`m` part of the
cohomology can be rewritten as the cohomology of a
simplicial complex, see Exercise 9.1.10 of [CLS]_,
[Perling]_. This is returned by the private method
:meth:`_sheaf_complex`.
The simplicial complex is the same for all points in a
chamber, but we currently do not make use of this and
compute each point `m\in M` separately.
* Finally, the cohomology (over `\QQ`) of this simplicial
complex is computed in the private method
:meth:`_sheaf_cohomology`. Summing over the supporting
points `m\in M` yields the cohomology of the sheaf`.
REFERENCES:
.. [Perling]
Markus Perling: Divisorial Cohomology Vanishing on Toric Varieties,
:arxiv:`0711.4836v2`
EXAMPLES:
Example 9.1.7 of Cox, Little, Schenck: "Toric Varieties" [CLS]_::
sage: F = Fan(cones=[(0,1), (1,2), (2,3), (3,4), (4,5), (5,0)],
... rays=[(1,0), (1,1), (0,1), (-1,0), (-1,-1), (0,-1)])
sage: dP6 = ToricVariety(F)
sage: D3 = dP6.divisor(2)
sage: D5 = dP6.divisor(4)
sage: D6 = dP6.divisor(5)
sage: D = -D3 + 2*D5 - D6
sage: D.cohomology()
{0: Vector space of dimension 0 over Rational Field,
1: Vector space of dimension 4 over Rational Field,
2: Vector space of dimension 0 over Rational Field}
sage: D.cohomology(deg=1)
Vector space of dimension 4 over Rational Field
sage: M = F.dual_lattice()
sage: D.cohomology( M(0,0) )
{0: Vector space of dimension 0 over Rational Field,
1: Vector space of dimension 1 over Rational Field,
2: Vector space of dimension 0 over Rational Field}
sage: D.cohomology( weight=M(0,0), deg=1 )
Vector space of dimension 1 over Rational Field
sage: dP6.integrate( D.ch() * dP6.Td() )
-4
Note the different output options::
sage: D.cohomology()
{0: Vector space of dimension 0 over Rational Field,
1: Vector space of dimension 4 over Rational Field,
2: Vector space of dimension 0 over Rational Field}
sage: D.cohomology(dim=True)
(0, 4, 0)
sage: D.cohomology(weight=M(0,0))
{0: Vector space of dimension 0 over Rational Field,
1: Vector space of dimension 1 over Rational Field,
2: Vector space of dimension 0 over Rational Field}
sage: D.cohomology(weight=M(0,0), dim=True)
(0, 1, 0)
sage: D.cohomology(deg=1)
Vector space of dimension 4 over Rational Field
sage: D.cohomology(deg=1, dim=True)
4
sage: D.cohomology(weight=M(0,0), deg=1)
Vector space of dimension 1 over Rational Field
sage: D.cohomology(weight=M(0,0), deg=1, dim=True)
1
Here is a Weil (non-Cartier) divisor example::
sage: K = toric_varieties.Cube_nonpolyhedral().K()
sage: K.is_Weil()
True
sage: K.is_QQ_Cartier()
False
sage: K.cohomology(dim=True)
(0, 0, 0, 1)
"""
if '_cohomology_vector' in self.__dict__ and weight is None:
# cache the cohomology but not the individual weight pieces
HH = self._cohomology_vector
else:
X = self.parent().scheme()
M = X.fan().dual_lattice()
support = self._sheaf_cohomology_support()
if weight is None:
m_list = [ M(p) for p in support.integral_points() ]
else:
m_list = [ M(weight) ]
HH = vector(ZZ, [0]*(X.dimension()+1))
for m_point in m_list:
cplx = self._sheaf_complex(m_point)
HH += self._sheaf_cohomology(cplx)
if weight is None:
self._cohomology_vector = HH
if dim:
if deg is None:
return HH
else:
return HH[deg]
else:
from sage.modules.free_module import VectorSpace
vectorspaces = dict( [k,VectorSpace(self.scheme().base_ring(),HH[k])]
for k in range(0,len(HH)) )
if deg is None:
return vectorspaces
else:
return vectorspaces[deg]
def cohomology_support(self):
r"""
Return the weights for which the cohomology groups do not vanish.
OUTPUT:
A tuple of dual lattice points. ``self.cohomology(weight=m)``
does not vanish if and only if ``m`` is in the output.
.. NOTE::
This method is provided for educational purposes and it is
not an efficient way of computing the cohomology groups.
EXAMPLES::
sage: F = Fan(cones=[(0,1), (1,2), (2,3), (3,4), (4,5), (5,0)],
... rays=[(1,0), (1,1), (0,1), (-1,0), (-1,-1), (0,-1)])
sage: dP6 = ToricVariety(F)
sage: D3 = dP6.divisor(2)
sage: D5 = dP6.divisor(4)
sage: D6 = dP6.divisor(5)
sage: D = -D3 + 2*D5 - D6
sage: D.cohomology_support()
(M(0, 0), M(1, 0), M(2, 0), M(1, 1))
"""
X = self.parent().scheme()
M = X.fan().dual_lattice()
support_hull = self._sheaf_cohomology_support()
support_hull = [ M(p) for p in support_hull.integral_points() ]
support = []
for m in support_hull:
cplx = self._sheaf_complex(m)
HH = self._sheaf_cohomology(cplx)
if sum(HH)>0:
support.append(m)
return tuple(support)
#********************************************************
class ToricRationalDivisorClassGroup(FreeModule_ambient_field, UniqueRepresentation):
r"""
The rational divisor class group of a toric variety.
The **T-Weil divisor class group** `\mathop{Cl}(X)` of a toric
variety `X` is a finitely generated abelian group and can contain
torsion. Its rank equals the number of rays in the fan of `X`
minus the dimension of `X`.
The **rational divisor class group** is `\mathop{Cl}(X)
\otimes_\ZZ \QQ` and never includes torsion. If `X` is *smooth*,
this equals the **Picard group** `\mathop{\mathrm{Pic}}(X)`, whose
elements are the isomorphism classes of line bundles on `X`. The
group law (which we write as addition) is the tensor product of
the line bundles. The Picard group of a toric variety is always
torsion-free.
.. WARNING::
Do not instantiate this class yourself. Use
:meth:`~sage.schemes.toric.variety.ToricVariety_field.rational_class_group`
method of :class:`toric varieties
<sage.schemes.toric.variety.ToricVariety_field>` if you need
the divisor class group. Or you can obtain it as the parent of any
divisor class constructed, for example, via
:meth:`ToricDivisor_generic.divisor_class`.
INPUT:
- ``toric_variety`` -- :class:`toric variety
<sage.schemes.toric.variety.ToricVariety_field`.
OUTPUT:
- rational divisor class group of a toric variety.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: P2.rational_class_group()
The toric rational divisor class group of a 2-d CPR-Fano
toric variety covered by 3 affine patches
sage: D = P2.divisor(0); D
V(x)
sage: Dclass = D.divisor_class(); Dclass
Divisor class [1]
sage: Dclass.lift()
V(y)
sage: Dclass.parent()
The toric rational divisor class group of a 2-d CPR-Fano
toric variety covered by 3 affine patches
"""
def __init__(self, toric_variety):
r"""
Construct the toric rational divisor class group.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: from sage.schemes.toric.divisor import ToricRationalDivisorClassGroup
sage: ToricRationalDivisorClassGroup(P2)
The toric rational divisor class group of a 2-d CPR-Fano
toric variety covered by 3 affine patches
TESTS:
Make sure we lift integral classes to integral divisors::
sage: rays = [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, 0, 1), (2, -1, -1)]
sage: cones = [(0, 2, 3), (0, 2, 4), (0, 3, 4), (1, 2, 3), (1, 2, 4), (1, 3, 4)]
sage: X = ToricVariety(Fan(cones=cones, rays=rays))
sage: Cl = X.rational_class_group()
sage: Cl._projection_matrix
[1 1 0 0 0]
[0 2 1 1 1]
sage: Cl._lift_matrix
[1 0]
[0 0]
[0 0]
[0 1]
[0 0]
sage: Cl._lift_matrix.base_ring()
Integer Ring
"""
self._variety = toric_variety
fan = toric_variety.fan()
nrays = fan.nrays()
rk = nrays - fan.lattice_dim()
super(ToricRationalDivisorClassGroup,self).__init__(base_field=QQ,
dimension=rk, sparse=False)
gale = fan.Gale_transform()
self._projection_matrix = gale.matrix_from_columns(range(nrays))
D, U, V = self._projection_matrix.transpose().smith_form()
assert all( D[i,i]==1 for i in range(0,D.ncols()) ), \
'This is a property of the Gale transform.'
self._lift_matrix = (V*D.transpose()*U).transpose()
def _repr_(self):
r"""
Return a string representation of ``self``.
OUTPUT:
- string.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: from sage.schemes.toric.divisor import ToricRationalDivisorClassGroup
sage: ToricRationalDivisorClassGroup(P2)._repr_()
'The toric rational divisor class group of a 2-d CPR-Fano toric variety covered by 3 affine patches'
"""
return 'The toric rational divisor class group of a %s' % self._variety
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
OUTPUT:
- string.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: from sage.schemes.toric.divisor import ToricRationalDivisorClassGroup
sage: ToricRationalDivisorClassGroup(P2)._latex_()
'\\mathop{Cl}_{\\QQ}\\left(\\mathbb{P}_{\\Delta^{2}}\\right)'
"""
return '\\mathop{Cl}_{\\QQ}\\left('+self._variety._latex_()+'\\right)'
def _element_constructor_(self, x):
r"""
Construct a :class:`ToricRationalDivisorClass`.
INPUT:
- ``x`` -- one of the following:
* toric divisor;
* vector;
* list.
OUTPUT:
- :class:`ToricRationalDivisorClass`.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: Cl = dP6.rational_class_group()
sage: D = dP6.divisor(2)
sage: Cl._element_constructor_(D)
Divisor class [0, 0, 1, 0]
sage: Cl(D)
Divisor class [0, 0, 1, 0]
"""
if is_ToricDivisor(x):
x = self._projection_matrix * vector(x)
if is_Vector(x):
x = list(x)
return ToricRationalDivisorClass(self, x)
# parent does not conform to the new-style coercion model
__call__ = _element_constructor_
class ToricRationalDivisorClassGroup_basis_lattice(FreeModule_ambient_pid):
r"""
Construct the basis lattice of the ``group``.
INPUT:
- ``group`` -- :class:`toric rational divisor class group
<ToricRationalDivisorClassGroup>`.
OUTPUT:
- the basis lattice of ``group``.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: L = P1xP1.Kaehler_cone().lattice()
sage: L
Basis lattice of The toric rational divisor class group of a
2-d CPR-Fano toric variety covered by 4 affine patches
sage: L.basis()
[
Divisor class [1, 0],
Divisor class [0, 1]
]
"""
def __init__(self, group):
r"""
See :class:`ToricRationalDivisorClassGroup_basis_lattice` for
documentation.
TESTS::
sage: P1xP1 = toric_varieties.P1xP1()
sage: L = P1xP1.Kaehler_cone().lattice()
sage: TestSuite(L).run()
"""
assert isinstance(group, ToricRationalDivisorClassGroup)
self._group = group
self._variety = group._variety
self._lift_matrix = group._lift_matrix
super(ToricRationalDivisorClassGroup_basis_lattice, self).__init__(
ZZ, group.dimension())
def _repr_(self):
r"""
Return a string representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: P1xP1 = toric_varieties.P1xP1()
sage: L = P1xP1.Kaehler_cone().lattice()
sage: print L._repr_()
Basis lattice of The toric rational divisor class group of a
2-d CPR-Fano toric variety covered by 4 affine patches
"""
return "Basis lattice of {}".format(self._group)
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: P1xP1 = toric_varieties.P1xP1()
sage: L = P1xP1.Kaehler_cone().lattice()
sage: print L._latex_()
\text{Basis lattice of }
\mathop{Cl}_{\QQ}\left(\mathbb{P}_{\Delta^{2}}\right)
"""
return r"\text{{Basis lattice of }} {}".format(latex(self._group))
_element_class = ToricRationalDivisorClass
| 34.192991
| 123
| 0.543807
|
4a028c6b34adf03688041f1814d251ac44f3a1f2
| 1,361
|
py
|
Python
|
main/forms.py
|
ggetzie/timekeeper
|
8db8295a72c5b54558207dd5082a5463803ce74d
|
[
"MIT"
] | null | null | null |
main/forms.py
|
ggetzie/timekeeper
|
8db8295a72c5b54558207dd5082a5463803ce74d
|
[
"MIT"
] | null | null | null |
main/forms.py
|
ggetzie/timekeeper
|
8db8295a72c5b54558207dd5082a5463803ce74d
|
[
"MIT"
] | null | null | null |
from django import forms
from django.urls import reverse
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Field, Div
from main.models import Hours, Project
class HoursForm(forms.ModelForm):
project = forms.ModelChoiceField(queryset=(Project
.objects
.filter(status__in=["D", "M", "P"])))
class Meta:
model = Hours
fields = ["date", "project", "quantity", "notes", "user"]
widgets = {"user": forms.HiddenInput(),
"date": forms.DateInput(attrs={"type":"date"})}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = ("POST")
self.helper.form_action = reverse("main:hours_create")
self.helper.form_class = "form-inline"
self.helper.form_show_labels = False
self.helper.layout = Layout(
Div(
Field("date"),
Field("project", wrapper_class="ml-2"),
Field("quantity", wrapper_class="ml-2"),
Field("notes", wrapper_class="ml-2 mr-2"),
Field("user"),
Submit("submit", "Submit"),
css_class="form-row"))
| 35.815789
| 84
| 0.542983
|
4a028d21e56bb3d96d4d7a60559ddf9793801145
| 10,668
|
py
|
Python
|
allennlp/models/semantic_parsing/nlvr/nlvr_direct_semantic_parser.py
|
unendin/allennlp
|
0dcbaea6dbc6cc43e24a3564d6d37f8a1421484c
|
[
"Apache-2.0"
] | null | null | null |
allennlp/models/semantic_parsing/nlvr/nlvr_direct_semantic_parser.py
|
unendin/allennlp
|
0dcbaea6dbc6cc43e24a3564d6d37f8a1421484c
|
[
"Apache-2.0"
] | 1
|
2019-07-31T20:12:41.000Z
|
2019-07-31T20:12:41.000Z
|
allennlp/models/semantic_parsing/nlvr/nlvr_direct_semantic_parser.py
|
unendin/allennlp
|
0dcbaea6dbc6cc43e24a3564d6d37f8a1421484c
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import List, Dict
from overrides import overrides
import torch
from allennlp.common import Params
from allennlp.data.fields.production_rule_field import ProductionRuleArray
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder
from allennlp.modules.similarity_functions import SimilarityFunction
from allennlp.nn.decoding import BeamSearch
from allennlp.nn.decoding.decoder_trainers import MaximumMarginalLikelihood
from allennlp.nn import util
from allennlp.models.model import Model
from allennlp.models.semantic_parsing.nlvr.nlvr_decoder_state import NlvrDecoderState
from allennlp.models.semantic_parsing.nlvr.nlvr_decoder_step import NlvrDecoderStep
from allennlp.models.semantic_parsing.nlvr.nlvr_semantic_parser import NlvrSemanticParser
from allennlp.semparse.worlds import NlvrWorld
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("nlvr_direct_parser")
class NlvrDirectSemanticParser(NlvrSemanticParser):
"""
``NlvrDirectSemanticParser`` is an ``NlvrSemanticParser`` that gets around the problem of lack
of logical form annotations by maximizing the marginal likelihood of an approximate set of target
sequences that yield the correct denotation. The main difference between this parser and
``NlvrCoverageSemanticParser`` is that while this parser takes the output of an offline search
process as the set of target sequences for training, the latter performs search during training.
Parameters
----------
vocab : ``Vocabulary``
Passed to super-class.
sentence_embedder : ``TextFieldEmbedder``
Passed to super-class.
action_embedding_dim : ``int``
Passed to super-class.
encoder : ``Seq2SeqEncoder``
Passed to super-class.
attention_function : ``SimilarityFunction``
We compute an attention over the input question at each step of the decoder, using the
decoder hidden state as the query. This is the similarity function we use for that
attention.
decoder_beam_search : ``BeamSearch``
Beam search used to retrieve best sequences after training.
max_decoding_steps : ``int``
Maximum number of steps for beam search after training.
dropout : ``float``, optional (default=0.0)
Probability of dropout to apply on encoder outputs, decoder outputs and predicted actions.
"""
def __init__(self,
vocab: Vocabulary,
sentence_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
attention_function: SimilarityFunction,
decoder_beam_search: BeamSearch,
max_decoding_steps: int,
dropout: float = 0.0) -> None:
super(NlvrDirectSemanticParser, self).__init__(vocab=vocab,
sentence_embedder=sentence_embedder,
action_embedding_dim=action_embedding_dim,
encoder=encoder,
dropout=dropout)
self._decoder_trainer = MaximumMarginalLikelihood()
self._decoder_step = NlvrDecoderStep(encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
attention_function=attention_function,
dropout=dropout)
self._decoder_beam_search = decoder_beam_search
self._max_decoding_steps = max_decoding_steps
self._action_padding_index = -1
@overrides
def forward(self, # type: ignore
sentence: Dict[str, torch.LongTensor],
worlds: List[List[NlvrWorld]],
actions: List[List[ProductionRuleArray]],
target_action_sequences: torch.LongTensor = None,
labels: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Decoder logic for producing type constrained target sequences, trained to maximize marginal
likelihod over a set of approximate logical forms.
"""
batch_size = len(worlds)
action_embeddings, action_indices = self._embed_actions(actions)
initial_rnn_state = self._get_initial_rnn_state(sentence)
initial_score_list = [util.new_variable_with_data(list(sentence.values())[0],
torch.Tensor([0.0]))
for i in range(batch_size)]
label_strings = self._get_label_strings(labels) if labels is not None else None
# TODO (pradeep): Assuming all worlds give the same set of valid actions.
initial_grammar_state = [self._create_grammar_state(worlds[i][0], actions[i]) for i in
range(batch_size)]
worlds_list = [worlds[i] for i in range(batch_size)]
initial_state = NlvrDecoderState(batch_indices=list(range(batch_size)),
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=initial_rnn_state,
grammar_state=initial_grammar_state,
action_embeddings=action_embeddings,
action_indices=action_indices,
possible_actions=actions,
worlds=worlds_list,
label_strings=label_strings)
if target_action_sequences is not None:
# Remove the trailing dimension (from ListField[ListField[IndexField]]).
target_action_sequences = target_action_sequences.squeeze(-1)
target_mask = target_action_sequences != self._action_padding_index
else:
target_mask = None
outputs: Dict[str, torch.Tensor] = {}
if target_action_sequences is not None:
outputs = self._decoder_trainer.decode(initial_state,
self._decoder_step,
(target_action_sequences, target_mask))
best_final_states = self._decoder_beam_search.search(self._max_decoding_steps,
initial_state,
self._decoder_step,
keep_final_unfinished_states=False)
best_action_sequences: Dict[int, List[List[int]]] = {}
for i in range(batch_size):
# Decoding may not have terminated with any completed logical forms, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
if i in best_final_states:
best_action_indices = [best_final_states[i][0].action_history[0]]
best_action_sequences[i] = best_action_indices
batch_action_strings = self._get_action_strings(actions, best_action_sequences)
batch_denotations = self._get_denotations(batch_action_strings, worlds)
if target_action_sequences is not None:
self._update_metrics(action_strings=batch_action_strings,
worlds=worlds,
label_strings=label_strings)
else:
outputs["best_action_strings"] = batch_action_strings
outputs["denotations"] = batch_denotations
return outputs
def _update_metrics(self,
action_strings: List[List[List[str]]],
worlds: List[List[NlvrWorld]],
label_strings: List[List[str]]) -> None:
# TODO(pradeep): Move this to the base class.
# TODO(pradeep): Using only the best decoded sequence. Define metrics for top-k sequences?
batch_size = len(worlds)
for i in range(batch_size):
instance_action_strings = action_strings[i]
sequence_is_correct = [False]
if instance_action_strings:
instance_label_strings = label_strings[i]
instance_worlds = worlds[i]
# Taking only the best sequence.
sequence_is_correct = self._check_denotation(instance_action_strings[0],
instance_label_strings,
instance_worlds)
for correct_in_world in sequence_is_correct:
self._denotation_accuracy(1 if correct_in_world else 0)
self._consistency(1 if all(sequence_is_correct) else 0)
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
'denotation_accuracy': self._denotation_accuracy.get_metric(reset),
'consistency': self._consistency.get_metric(reset)
}
@classmethod
def from_params(cls, vocab, params: Params) -> 'NlvrDirectSemanticParser':
sentence_embedder_params = params.pop("sentence_embedder")
sentence_embedder = TextFieldEmbedder.from_params(vocab, sentence_embedder_params)
action_embedding_dim = params.pop_int('action_embedding_dim')
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
dropout = params.pop_float('dropout', 0.0)
attention_function_type = params.pop("attention_function", None)
if attention_function_type is not None:
attention_function = SimilarityFunction.from_params(attention_function_type)
else:
attention_function = None
decoder_beam_search = BeamSearch.from_params(params.pop("decoder_beam_search"))
max_decoding_steps = params.pop_int("max_decoding_steps")
params.assert_empty(cls.__name__)
return cls(vocab,
sentence_embedder=sentence_embedder,
action_embedding_dim=action_embedding_dim,
encoder=encoder,
attention_function=attention_function,
decoder_beam_search=decoder_beam_search,
max_decoding_steps=max_decoding_steps,
dropout=dropout)
| 53.34
| 101
| 0.61886
|
4a028dc7b4415340944f44d577fedf42895f0042
| 395
|
py
|
Python
|
pages/migrations/0007_coachingcontact_city.py
|
yogeshprasad/spa-development
|
1bee9ca64da5815e1c9a2f7af43b44b59ee2ca7b
|
[
"Apache-2.0"
] | null | null | null |
pages/migrations/0007_coachingcontact_city.py
|
yogeshprasad/spa-development
|
1bee9ca64da5815e1c9a2f7af43b44b59ee2ca7b
|
[
"Apache-2.0"
] | 7
|
2020-06-05T19:11:22.000Z
|
2022-03-11T23:30:57.000Z
|
pages/migrations/0007_coachingcontact_city.py
|
yogeshprasad/spa-development
|
1bee9ca64da5815e1c9a2f7af43b44b59ee2ca7b
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-11-05 15:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0006_courseprice'),
]
operations = [
migrations.AddField(
model_name='coachingcontact',
name='city',
field=models.CharField(default='', max_length=500),
),
]
| 20.789474
| 63
| 0.594937
|
4a028e711dcd528c6f12fbe9db6a2d0cc34358dc
| 3,826
|
py
|
Python
|
caspy/query.py
|
altaurog/django-caspy
|
c4ec0d39ed94b74a68784878018d6a3ff5e2dba3
|
[
"BSD-3-Clause"
] | 1
|
2017-01-30T23:37:27.000Z
|
2017-01-30T23:37:27.000Z
|
caspy/query.py
|
altaurog/django-caspy
|
c4ec0d39ed94b74a68784878018d6a3ff5e2dba3
|
[
"BSD-3-Clause"
] | null | null | null |
caspy/query.py
|
altaurog/django-caspy
|
c4ec0d39ed94b74a68784878018d6a3ff5e2dba3
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import utils
from . import models, django_orm
class IntegrityError(Exception):
pass
class BaseQuery(object):
def __init__(self, model):
self.model = model
self.qset = model.objects
self.to_domain = django_orm.orm_to_domain
self.to_orm = django_orm.domain_to_orm
def all(self):
return map(self.to_domain, self.qset.all())
def by_pk(self, pk):
try:
return self.qset.get(pk=pk)
except self.qset.model.DoesNotExist:
return None
def get(self, pk):
obj = self.by_pk(pk)
if obj is not None:
return self.to_domain(obj)
def save(self, obj):
instance = self.to_orm(obj)
instance.save()
return instance
def delete(self, pk):
obj = self.by_pk(pk)
if obj is not None:
obj.delete()
return True
currency = BaseQuery(models.Currency)
book = BaseQuery(models.Book)
accounttype = BaseQuery(models.AccountType)
class AccountQuery(BaseQuery):
def all(self, book_id):
return map(self.to_domain, self.model.tree.load_book(book_id))
def by_pk(self, book_id, account_id):
return self.model.tree.load_one(book_id, account_id)
def get(self, book_id, account_id):
instance = self.by_pk(book_id, account_id)
if instance is not None:
return self.to_domain(instance)
def save(self, obj):
try:
instance = super(AccountQuery, self).save(obj)
self.model.tree.detach(instance)
self.model.tree.attach(instance, obj.parent_id)
except utils.IntegrityError as e:
raise IntegrityError(str(e))
def delete(self, book_id, account_id):
instance = self.by_pk(book_id, account_id)
if instance is not None:
instance.delete()
return True
account = AccountQuery(models.Account)
class TransactionQuery(BaseQuery):
def all(self, book_id):
qset = self._split_qset(book_id)
return list(self._load(qset))
def get(self, book_id, transaction_id):
qset = self._split_qset(book_id, transaction_id)
result = list(self._load(qset))
if result:
return result[0]
def save(self, obj):
if obj.transaction_id:
models.Split.objects.filter(
transaction=obj.transaction_id
).delete()
instance = super(TransactionQuery, self).save(obj)
splits = list(self.splits(obj, instance))
models.Split.objects.bulk_create(splits)
return instance
def delete(self, book_id, transaction_id):
qset = self._split_qset(book_id, transaction_id)
try:
split = qset.select_related('transaction')[0]
except IndexError:
return False
split.transaction.delete()
return True
def splits(self, obj, instance):
for s in obj.splits:
si = self.to_orm(s)
si.transaction = instance
yield si
def _split_qset(self, book_id, transaction_id=None):
qargs = {'account__book_id': book_id}
if transaction_id is not None:
qargs['transaction'] = transaction_id
return models.Split.objects.filter(**qargs)
def _load(self, split_qset):
transactions = {}
for split in split_qset.select_related('transaction'):
try:
xact = transactions[split.transaction_id]
except KeyError:
xact = self.to_domain(split.transaction)
transactions[split.transaction_id] = xact
xact.splits.append(self.to_domain(split))
return transactions.values()
transaction = TransactionQuery(models.Transaction)
| 29.206107
| 70
| 0.611605
|
4a028ea8f78591a986f6b78ba2ede2017b23d8c7
| 4,444
|
py
|
Python
|
winguhub/share/models.py
|
movicha/winguhub
|
8a82615952db47bc332d5691b1ef6dea521b422c
|
[
"Apache-2.0"
] | null | null | null |
winguhub/share/models.py
|
movicha/winguhub
|
8a82615952db47bc332d5691b1ef6dea521b422c
|
[
"Apache-2.0"
] | null | null | null |
winguhub/share/models.py
|
movicha/winguhub
|
8a82615952db47bc332d5691b1ef6dea521b422c
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from django.db import models
from winguhub.utils import normalize_file_path, normalize_dir_path, gen_token
class AnonymousShare(models.Model):
"""
Model used for sharing repo to unregistered email.
"""
repo_owner = models.EmailField(max_length=255)
repo_id = models.CharField(max_length=36)
anonymous_email = models.EmailField(max_length=255)
token = models.CharField(max_length=25, unique=True)
class FileShare(models.Model):
"""
Model used for file or dir shared link.
"""
username = models.EmailField(max_length=255, db_index=True)
repo_id = models.CharField(max_length=36, db_index=True)
path = models.TextField()
token = models.CharField(max_length=10, unique=True)
ctime = models.DateTimeField(default=datetime.datetime.now)
view_cnt = models.IntegerField(default=0)
s_type = models.CharField(max_length=2, db_index=True, default='f') # `f` or `d`
class PrivateFileDirShareManager(models.Manager):
def add_private_file_share(self, from_user, to_user, repo_id, path, perm):
"""
"""
path = normalize_file_path(path)
token = gen_token(max_length=10)
pfs = self.model(from_user=from_user, to_user=to_user, repo_id=repo_id,
path=path, s_type='f', token=token, permission=perm)
pfs.save(using=self._db)
return pfs
def add_read_only_priv_file_share(self, from_user, to_user, repo_id, path):
"""
"""
return self.add_private_file_share(from_user, to_user, repo_id,
path, 'r')
def get_private_share_in_file(self, username, repo_id, path):
"""Get a file that private shared to ``username``.
"""
path = normalize_file_path(path)
ret = super(PrivateFileDirShareManager, self).filter(
to_user=username, repo_id=repo_id, path=path, s_type='f')
return ret[0] if len(ret) > 0 else None
def add_private_dir_share(self, from_user, to_user, repo_id, path, perm):
"""
"""
path = normalize_dir_path(path)
token = gen_token(max_length=10)
pfs = self.model(from_user=from_user, to_user=to_user, repo_id=repo_id,
path=path, s_type='d', token=token, permission=perm)
pfs.save(using=self._db)
return pfs
def get_private_share_in_dir(self, username, repo_id, path):
"""Get a directory that private shared to ``username``.
"""
path = normalize_dir_path(path)
ret = super(PrivateFileDirShareManager, self).filter(
to_user=username, repo_id=repo_id, path=path, s_type='d')
return ret[0] if len(ret) > 0 else None
def get_priv_file_dir_share_by_token(self, token):
return super(PrivateFileDirShareManager, self).get(token=token)
def delete_private_file_dir_share(self, from_user, to_user, repo_id, path):
"""
"""
super(PrivateFileDirShareManager, self).filter(
from_user=from_user, to_user=to_user, repo_id=repo_id,
path=path).delete()
def list_private_share_out_by_user(self, from_user):
"""List files/directories private shared from ``from_user``.
"""
return super(PrivateFileDirShareManager, self).filter(
from_user=from_user)
def list_private_share_in_by_user(self, to_user):
"""List files/directories private shared to ``to_user``.
"""
return super(PrivateFileDirShareManager, self).filter(
to_user=to_user)
def list_private_share_in_dirs_by_user_and_repo(self, to_user, repo_id):
"""List directories private shared to ``to_user`` base on ``repo_id``.
"""
return super(PrivateFileDirShareManager, self).filter(
to_user=to_user, repo_id=repo_id, s_type='d')
class PrivateFileDirShare(models.Model):
from_user = models.CharField(max_length=255, db_index=True)
to_user = models.CharField(max_length=255, db_index=True)
repo_id = models.CharField(max_length=36, db_index=True)
path = models.TextField()
token = models.CharField(max_length=10, unique=True)
permission = models.CharField(max_length=5) # `r` or `rw`
s_type = models.CharField(max_length=5, default='f') # `f` or `d`
objects = PrivateFileDirShareManager()
| 38.310345
| 84
| 0.65414
|
4a028ec3928449072264f9623b3b1db6b36005c7
| 5,474
|
py
|
Python
|
scripts/movie_search.py
|
pnavais/python-lab
|
a6c4f47674be79762ae4ab9aed5bee04e8d73f47
|
[
"Apache-2.0"
] | null | null | null |
scripts/movie_search.py
|
pnavais/python-lab
|
a6c4f47674be79762ae4ab9aed5bee04e8d73f47
|
[
"Apache-2.0"
] | null | null | null |
scripts/movie_search.py
|
pnavais/python-lab
|
a6c4f47674be79762ae4ab9aed5bee04e8d73f47
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 Pablo Navais
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys, getopt
import datetime
import time
import threading
import requests
import urllib.parse
#
# A little utility script to search movies using TheMovieDB REST API
# Usage : movie_search.py -a <MOVIE_DB_API_KEY> -n <MOVIE_NAME>
####################################################################
# Constants
# #########
RED = "\u001b[31m"
YELLOW = "\u001b[33m"
BLUE = "\u001b[34m"
WHITE = "\u001b[37m"
GREEN = "\u001b[32m"
RESET = "\u001b[0m"
BOLD = "\u001b[1m"
UNDERLINE = "\u001b[4m"
SAVE_CUR = "\033[s"
RESET_CUR = "\033[u"
CLEAR_RIGHT = "\033[K"
CHECK_SYMBOL = "\u2713"
CROSS_SYMBOL = "\u274C"
MOVIE_DB_SEARCH_URL = "https://api.themoviedb.org/3/search/movie?api_key=<api_key>&query=<query>&page=<page>"
# Globals
# #########
api_key = ''
movie_name = ''
movies_list = []
max_movies = -1
"""
Return an ANSI red colored string
"""
def red(str):
return RED+str+RESET
"""
Return an ANSI yellow colored string
"""
def yellow(str):
return YELLOW+str+RESET
"""
Return an ANSI green colored string
"""
def green(str):
return GREEN+str+RESET
"""
Return an ANSI bold colored string
"""
def bold(str):
return BOLD+str+RESET
"""
Displays help syntax
"""
def showHelp():
print(yellow("Usage : ")+os.path.basename(__file__)+" [-a <api_key>] [-n <movie_name>] [-h]")
print("\nWhere :")
print("\t-a, --api: the MovieDB API key")
print("\t-n, --name: the name of the movie to search")
print("\t-m, --max: the maximum number of movies to show")
print("\t-h, --help: this help")
"""
Parses the command line input
and retrieves the actual parameters.
"""
def parseCmd(argv):
global api_key
global movie_name
global max_movies
try:
opts, args = getopt.getopt(argv,"ha:n:m:",["help","api=","name=","max="])
except getopt.GetoptError as e:
print("\n"+red(str(e))+"\n", file=sys.stderr)
showHelp()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
showHelp()
sys.exit(0)
elif opt in ("-a", "--api"):
api_key = arg
elif opt in ("-n", "--name"):
movie_name = arg
elif opt in ("-m", "--max"):
max_movies = int(arg) if arg.isdigit() else 0
if not (api_key):
print(red("Missing MovieDB API key"))
sys.exit(3)
if not (movie_name):
print(red("Missing Movie Name"))
sys.exit(4)
if (max_movies == 0):
print(red("Invalid maximum number of movies"))
sys.exit(5)
"""
Simply displays a waiting message
until interrupted or nmovies found
"""
def waitLoop():
global movies_list
try:
while not movies_list:
for j in range(0,3):
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(0.3)
sys.stdout.write(RESET_CUR)
sys.stdout.write(CLEAR_RIGHT)
sys.stdout.flush()
time.sleep(0.5)
except:
pass
sys.stdout.write(RESET_CUR)
sys.stdout.write(CLEAR_RIGHT)
symbol=green(CHECK_SYMBOL) if movies_list else red(CROSS_SYMBOL)
sys.stdout.write(symbol)
sys.stdout.flush()
"""
Find the given movie
"""
def findMovie(api_key, movie_name, max_movies):
try:
current_page = 1
more_pages = True
search_url = re.sub("<api_key>", api_key, MOVIE_DB_SEARCH_URL)
base_search_url = re.sub("<query>", urllib.parse.quote(movie_name, safe=''), search_url)
while more_pages:
search_url = re.sub("<page>", str(current_page), base_search_url)
resp = requests.get(search_url)
if resp.status_code == 200:
r_json = resp.json();
total_pages = r_json['total_pages']
movies_result = r_json['results'];
for movie in movies_result:
try:
date_time = datetime.datetime.strptime(movie['release_date'], '%Y-%m-%d')
year = str(date_time.date().year)
except:
year = "????"
pass
movies_list.append(movie['title']+" - ("+year+")")
if ((max_movies>0) and (len(movies_list)>=max_movies)):
break
current_page+=1
more_pages = (current_page<=total_pages)
else:
more_pages = False
if ((max_movies>0) and (len(movies_list)>=max_movies)):
break
except Exception as e:
print("Error processing request : "+str(e))
return movies_list
""" Main function """
def main(argv):
parseCmd(argv)
sys.stdout.write("Searching movie ["+green(movie_name)+"] ")
sys.stdout.flush()
sys.stdout.write(SAVE_CUR)
time.sleep(1)
# Launch the movie search thread
t = threading.Thread(target=waitLoop)
t.start()
movies_list = findMovie(api_key, movie_name, max_movies)
t.join()
if movies_list:
movies_list_size = len(movies_list);
res_name = "result" if movies_list_size == 1 else "results"
print("\n\n"+yellow(str(movies_list_size)+" "+res_name+" found :"))
i=1;
for movie in movies_list:
print("[%d]"%i+" "+movie)
i+=1;
else:
print("\n\nNo results found")
# Main entry point
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt as e:
sys.exit(0)
| 23.493562
| 109
| 0.655462
|
4a028f110965475fb6684d0d12f2935d39142edc
| 1,538
|
py
|
Python
|
lib/kover_amr/models/scm.py
|
aldro61/kb_kover_amr
|
0b5ed1cfd31a486caa31145711a26b32bc81cb72
|
[
"MIT"
] | null | null | null |
lib/kover_amr/models/scm.py
|
aldro61/kb_kover_amr
|
0b5ed1cfd31a486caa31145711a26b32bc81cb72
|
[
"MIT"
] | null | null | null |
lib/kover_amr/models/scm.py
|
aldro61/kb_kover_amr
|
0b5ed1cfd31a486caa31145711a26b32bc81cb72
|
[
"MIT"
] | 2
|
2018-08-23T16:09:36.000Z
|
2020-03-20T08:02:35.000Z
|
"""
Set Covering Machine model
"""
import json
import os
from Bio.Seq import Seq
class SCMModel():
def __init__(self, path):
"""
Initialize the model based on description files
"""
self.path = path
model_info = json.load(open(os.path.join(path, "results.json"), "r"))["model"]
self.rules = model_info["rules"]
self.type = model_info["type"]
del model_info
def predict(self, kmers):
"""
Predict phenotype based on k-mers (dict)
Resistant = 1, Susceptible = 0
"""
if not isinstance(kmers, set):
raise Exception("Expected k-mers the be a dict.")
rules_true = []
rules_false = []
for rule in self.rules:
km = rule.replace("Presence(", "").replace("Absence(", "").replace(")", "")
km_rc = str(Seq(km).reverse_complement())
if "Presence" in rule and (km in kmers or km_rc in kmers):
rules_true.append(rule)
elif "Absence" in rule and not (km in kmers or km_rc in kmers):
rules_true.append(rule)
else:
rules_false.append(rule)
if self.type == "conjunction":
predicted_pheno = 1 if len(rules_true) == len(self.rules) else 0
else:
predicted_pheno = 1 if len(rules_true) > 0 else 0
# Pretty printing
predicted_pheno = "resistant" if predicted_pheno == 1 else "susceptible"
return predicted_pheno, rules_true
| 28.481481
| 87
| 0.563719
|
4a02911fcedb0a3e01fc697f8f6c86bf8cf719ce
| 12,433
|
py
|
Python
|
test/test_numpy.py
|
tomaskrehlik/orjson
|
53d0704db3330295dda6184e262bfaeb60a16418
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
test/test_numpy.py
|
tomaskrehlik/orjson
|
53d0704db3330295dda6184e262bfaeb60a16418
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
test/test_numpy.py
|
tomaskrehlik/orjson
|
53d0704db3330295dda6184e262bfaeb60a16418
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import unittest
import pytest
import orjson
try:
import numpy
except ImportError:
numpy = None
def numpy_default(obj):
return obj.tolist()
@pytest.mark.skipif(numpy is None, reason="numpy is not installed")
class NumpyTests(unittest.TestCase):
def test_numpy_array_d1_uintp(self):
self.assertEqual(
orjson.dumps(
numpy.array([0, 18446744073709551615], numpy.uintp),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[0,18446744073709551615]",
)
def test_numpy_array_d1_intp(self):
self.assertEqual(
orjson.dumps(
numpy.array([-9223372036854775807, 9223372036854775807], numpy.intp),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[-9223372036854775807,9223372036854775807]",
)
def test_numpy_array_d1_i64(self):
self.assertEqual(
orjson.dumps(
numpy.array([-9223372036854775807, 9223372036854775807], numpy.int64),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[-9223372036854775807,9223372036854775807]",
)
def test_numpy_array_d1_u64(self):
self.assertEqual(
orjson.dumps(
numpy.array([0, 18446744073709551615], numpy.uint64),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[0,18446744073709551615]",
)
def test_numpy_array_d1_i8(self):
self.assertEqual(
orjson.dumps(
numpy.array([-128, 127], numpy.int8),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[-128,127]",
)
def test_numpy_array_d1_u8(self):
self.assertEqual(
orjson.dumps(
numpy.array([0, 255], numpy.uint8),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[0,255]",
)
def test_numpy_array_d1_i32(self):
self.assertEqual(
orjson.dumps(
numpy.array([-2147483647, 2147483647], numpy.int32),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[-2147483647,2147483647]",
)
def test_numpy_array_d1_u32(self):
self.assertEqual(
orjson.dumps(
numpy.array([0, 4294967295], numpy.uint32),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[0,4294967295]",
)
def test_numpy_array_d1_f32(self):
self.assertEqual(
orjson.dumps(
numpy.array([1.0, 3.4028235e38], numpy.float32),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[1.0,3.4028235e38]",
)
def test_numpy_array_d1_f64(self):
self.assertEqual(
orjson.dumps(
numpy.array([1.0, 1.7976931348623157e308], numpy.float64),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[1.0,1.7976931348623157e308]",
)
def test_numpy_array_d1_bool(self):
self.assertEqual(
orjson.dumps(
numpy.array([True, False, False, True]),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[true,false,false,true]",
)
def test_numpy_array_d2_i64(self):
self.assertEqual(
orjson.dumps(
numpy.array([[1, 2, 3], [4, 5, 6]], numpy.int64),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[[1,2,3],[4,5,6]]",
)
def test_numpy_array_d2_f64(self):
self.assertEqual(
orjson.dumps(
numpy.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], numpy.float64),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[[1.0,2.0,3.0],[4.0,5.0,6.0]]",
)
def test_numpy_array_d3_i8(self):
self.assertEqual(
orjson.dumps(
numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.int8),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[[[1,2],[3,4]],[[5,6],[7,8]]]",
)
def test_numpy_array_d3_u8(self):
self.assertEqual(
orjson.dumps(
numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.uint8),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[[[1,2],[3,4]],[[5,6],[7,8]]]",
)
def test_numpy_array_d3_i32(self):
self.assertEqual(
orjson.dumps(
numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.int32),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[[[1,2],[3,4]],[[5,6],[7,8]]]",
)
def test_numpy_array_d3_i64(self):
self.assertEqual(
orjson.dumps(
numpy.array([[[1, 2], [3, 4], [5, 6], [7, 8]]], numpy.int64),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[[[1,2],[3,4],[5,6],[7,8]]]",
)
def test_numpy_array_d3_f64(self):
self.assertEqual(
orjson.dumps(
numpy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], numpy.float64
),
option=orjson.OPT_SERIALIZE_NUMPY,
),
b"[[[1.0,2.0],[3.0,4.0]],[[5.0,6.0],[7.0,8.0]]]",
)
def test_numpy_array_fotran(self):
array = numpy.array([[1, 2], [3, 4]], order="F")
assert array.flags["F_CONTIGUOUS"] == True
with self.assertRaises(orjson.JSONEncodeError):
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
self.assertEqual(
orjson.dumps(
array, default=numpy_default, option=orjson.OPT_SERIALIZE_NUMPY
),
orjson.dumps(array.tolist()),
)
def test_numpy_array_unsupported_dtype(self):
array = numpy.array([[1, 2], [3, 4]], numpy.float16)
with self.assertRaises(orjson.JSONEncodeError):
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
self.assertEqual(
orjson.dumps(
array, default=numpy_default, option=orjson.OPT_SERIALIZE_NUMPY
),
orjson.dumps(array.tolist()),
)
def test_numpy_array_d1(self):
array = numpy.array([1])
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
def test_numpy_array_d2(self):
array = numpy.array([[1]])
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
def test_numpy_array_d3(self):
array = numpy.array([[[1]]])
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
def test_numpy_array_d4(self):
array = numpy.array([[[[1]]]])
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
def test_numpy_array_4_stride(self):
array = numpy.random.rand(4, 4, 4, 4)
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
def test_numpy_array_dimension_zero(self):
array = numpy.array(0)
assert array.ndim == 0
with self.assertRaises(orjson.JSONEncodeError):
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
array = numpy.empty((0, 4, 2))
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
array = numpy.empty((4, 0, 2))
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
array = numpy.empty((2, 4, 0))
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
def test_numpy_array_dimension_max(self):
array = numpy.random.rand(
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
)
assert array.ndim == 32
self.assertEqual(
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
),
array.tolist(),
)
def test_numpy_scalar_int8(self):
self.assertEqual(
orjson.dumps(numpy.int8(0), option=orjson.OPT_SERIALIZE_NUMPY), b"0"
)
self.assertEqual(
orjson.dumps(numpy.int8(127), option=orjson.OPT_SERIALIZE_NUMPY),
b"127",
)
self.assertEqual(
orjson.dumps(numpy.int8(--128), option=orjson.OPT_SERIALIZE_NUMPY),
b"-128",
)
def test_numpy_scalar_int32(self):
self.assertEqual(
orjson.dumps(numpy.int32(1), option=orjson.OPT_SERIALIZE_NUMPY), b"1"
)
self.assertEqual(
orjson.dumps(numpy.int32(2147483647), option=orjson.OPT_SERIALIZE_NUMPY),
b"2147483647",
)
self.assertEqual(
orjson.dumps(numpy.int32(-2147483648), option=orjson.OPT_SERIALIZE_NUMPY),
b"-2147483648",
)
def test_numpy_scalar_int64(self):
self.assertEqual(
orjson.dumps(
numpy.int64(-9223372036854775808), option=orjson.OPT_SERIALIZE_NUMPY
),
b"-9223372036854775808",
)
self.assertEqual(
orjson.dumps(
numpy.int64(9223372036854775807), option=orjson.OPT_SERIALIZE_NUMPY
),
b"9223372036854775807",
)
def test_numpy_scalar_uint8(self):
self.assertEqual(
orjson.dumps(numpy.uint8(0), option=orjson.OPT_SERIALIZE_NUMPY), b"0"
)
self.assertEqual(
orjson.dumps(numpy.uint8(255), option=orjson.OPT_SERIALIZE_NUMPY),
b"255",
)
def test_numpy_scalar_uint32(self):
self.assertEqual(
orjson.dumps(numpy.uint32(0), option=orjson.OPT_SERIALIZE_NUMPY), b"0"
)
self.assertEqual(
orjson.dumps(numpy.uint32(4294967295), option=orjson.OPT_SERIALIZE_NUMPY),
b"4294967295",
)
def test_numpy_scalar_uint64(self):
self.assertEqual(
orjson.dumps(numpy.uint64(0), option=orjson.OPT_SERIALIZE_NUMPY), b"0"
)
self.assertEqual(
orjson.dumps(
numpy.uint64(18446744073709551615), option=orjson.OPT_SERIALIZE_NUMPY
),
b"18446744073709551615",
)
def test_numpy_scalar_float32(self):
self.assertEqual(
orjson.dumps(numpy.float32(1.0), option=orjson.OPT_SERIALIZE_NUMPY), b"1.0"
)
def test_numpy_scalar_float64(self):
self.assertEqual(
orjson.dumps(numpy.float64(123.123), option=orjson.OPT_SERIALIZE_NUMPY),
b"123.123",
)
| 28.780093
| 87
| 0.492801
|
4a0292de48aba4eb417c72d5fbd7777762130ff1
| 140
|
py
|
Python
|
LuoguCodes/AT1978.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
LuoguCodes/AT1978.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
LuoguCodes/AT1978.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
n = int(raw_input().split()[0])
a = []
for i in range(n):
a.append(raw_input())
a.sort()
ans = ';';
for i in a:
ans += i;
print ans
| 14
| 31
| 0.535714
|
4a02935fda4fe010a940b678138731cbc5d30260
| 2,991
|
py
|
Python
|
addons14/storage_image_product/tests/test_product_image_relation.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-06-10T14:59:13.000Z
|
2021-06-10T14:59:13.000Z
|
addons14/storage_image_product/tests/test_product_image_relation.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | null | null | null |
addons14/storage_image_product/tests/test_product_image_relation.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-04-09T09:44:44.000Z
|
2021-04-09T09:44:44.000Z
|
# Copyright 2017 Akretion (http://www.akretion.com).
# @author Sébastien BEAU <sebastien.beau@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .common import ProductImageCommonCase
class ProductImageCase(ProductImageCommonCase):
def test_available_attribute_value(self):
# The template have already 5 attribute values
# see demo data of ipad
image = self.env["product.image.relation"].new(
{"product_tmpl_id": self.template.id}
)
self.assertEqual(len(image.available_attribute_value_ids), 5)
def test_add_image_for_all_variant(self):
self.assertEqual(len(self.product_a.variant_image_ids), 0)
image = self.env["product.image.relation"].create(
{"product_tmpl_id": self.template.id, "image_id": self.logo_image.id}
)
self.assertEqual(self.product_a.variant_image_ids, image)
self.assertEqual(self.product_b.variant_image_ids, image)
self.assertEqual(self.product_c.variant_image_ids, image)
def test_add_image_for_white_variant(self):
image = self.env["product.image.relation"].create(
{
"product_tmpl_id": self.template.id,
"image_id": self.white_image.id,
"attribute_value_ids": [
(6, 0, [self.env.ref("product.product_attribute_value_3").id])
],
}
)
# White product should have the image
self.assertEqual(self.product_a.variant_image_ids, image)
self.assertEqual(self.product_c.variant_image_ids, image)
# Black product should not have the image
self.assertEqual(len(self.product_b.variant_image_ids), 0)
def test_add_image_for_white_and_black_variant(self):
logo = self.env["product.image.relation"].create(
{"product_tmpl_id": self.template.id, "image_id": self.logo_image.id}
)
image_wh = self.env["product.image.relation"].create(
{
"product_tmpl_id": self.template.id,
"image_id": self.white_image.id,
"attribute_value_ids": [
(6, 0, [self.env.ref("product.product_attribute_value_3").id])
],
}
)
image_bk = self.env["product.image.relation"].create(
{
"product_tmpl_id": self.template.id,
"image_id": self.black_image.id,
"attribute_value_ids": [
(6, 0, [self.env.ref("product.product_attribute_value_4").id])
],
}
)
# White product should have the white image and the logo
self.assertEqual(self.product_a.variant_image_ids, image_wh + logo)
self.assertEqual(self.product_c.variant_image_ids, image_wh + logo)
# Black product should have the black image and the logo
self.assertEqual(self.product_b.variant_image_ids, image_bk + logo)
| 43.347826
| 82
| 0.625878
|
4a0293fe1a2cdc37fb00ec504d5c50f2fdf8be89
| 1,188
|
py
|
Python
|
examples/options.py
|
dvvolynkin/socratic
|
d7b7aea41e65bb1a72ba439e6ea7368346b20c4d
|
[
"MIT"
] | 1
|
2020-04-24T08:58:17.000Z
|
2020-04-24T08:58:17.000Z
|
examples/options.py
|
dvvolynkin/socratic
|
d7b7aea41e65bb1a72ba439e6ea7368346b20c4d
|
[
"MIT"
] | null | null | null |
examples/options.py
|
dvvolynkin/socratic
|
d7b7aea41e65bb1a72ba439e6ea7368346b20c4d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Socratic - very simple question-answer dialogue system based on python generators.
#
# Daniil Volynkin
# foxezzz@gmail.com
#
# License: MIT
#
from dataclasses import dataclass
from typing import List, Optional
from socratic import Dialogue
@dataclass
class AnswerWithOptions:
message: str
options: Optional[List[str]] = None
def simple_dialogue(first_question):
if first_question == "Hello!":
second_question = yield AnswerWithOptions(
message="Hello!",
)
elif first_question == "How are you?":
second_question = yield AnswerWithOptions(
"Awesome!",
)
else:
second_question = yield AnswerWithOptions(
"I don't understand you :c!",
options=['Hello!', 'How are you?']
)
yield from simple_dialogue(second_question)
def main():
dialog = Dialogue(simple_dialogue)
print("Please type something: ")
while True:
user_replic = input('-- ')
answer = dialog.say(user_replic)
print("--", answer.message)
if answer.options:
print(answer.options)
if __name__ == '__main__':
main()
| 21.214286
| 84
| 0.627946
|
4a02944115a456b201583fb164cafe357ae8eb72
| 5,779
|
py
|
Python
|
intuition_experiments.py
|
wangsiqinudt/AnomalyDetectionTransformations
|
999d4eceacb12c7debdb49f5b58971ef4af5110d
|
[
"MIT"
] | 152
|
2018-11-06T14:41:21.000Z
|
2022-03-20T03:53:37.000Z
|
intuition_experiments.py
|
PramuPerera/OCDA
|
05e6872e74c0de83be45fb4002c103cb85586b4a
|
[
"MIT"
] | 7
|
2018-12-13T08:49:13.000Z
|
2021-08-30T12:58:18.000Z
|
intuition_experiments.py
|
PramuPerera/OCDA
|
05e6872e74c0de83be45fb4002c103cb85586b4a
|
[
"MIT"
] | 44
|
2018-12-05T08:42:10.000Z
|
2022-03-13T09:52:00.000Z
|
import itertools
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
from transformations import SimpleTransformer
from utils import load_mnist
from keras.utils import to_categorical
from keras.layers import Flatten, Conv2D, Dense, BatchNormalization, MaxPool2D, Input, Lambda, average
from keras.models import Sequential, Model
import keras.backend as K
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = load_mnist()
# scale to be in [0, 1]
x_train = (x_train + 1) / 2.
x_test = (x_test + 1) / 2.
single_class_ind = 3
anomaly_class_ind = 0
x_train_single = x_train[y_train == single_class_ind]
x_test_single = x_test[y_test == single_class_ind]
x_test_anomaly = x_test[y_test == anomaly_class_ind]
transformer = SimpleTransformer()
transformations_inds = np.tile(np.arange(transformer.n_transforms), len(x_train_single))
x_train_single_transformed = transformer.transform_batch(np.repeat(x_train_single, transformer.n_transforms, axis=0),
transformations_inds)
mdl = Sequential([Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 1), activation='relu'),
BatchNormalization(axis=-1),
MaxPool2D(),
Flatten(),
Dense(10, activation='relu'),
BatchNormalization(axis=-1),
Dense(transformer.n_transforms, activation='softmax')])
mdl.compile('adam',
'categorical_crossentropy',
['acc'])
batch_size = 64
mdl.fit(x=x_train_single_transformed,
y=to_categorical(transformations_inds),
batch_size=batch_size,
validation_split=0.1,
epochs=10)
single_class_preds = np.zeros((len(x_test_single), transformer.n_transforms))
for t in range(transformer.n_transforms):
single_class_preds[:, t] = mdl.predict(transformer.transform_batch(x_test_single, [t] * len(x_test_single)),
batch_size=batch_size)[:, t]
single_class_scores = single_class_preds.mean(axis=-1)
anomaly_class_preds = np.zeros((len(x_test_anomaly), transformer.n_transforms))
for t in range(transformer.n_transforms):
anomaly_class_preds[:, t] = mdl.predict(transformer.transform_batch(x_test_anomaly, [t] * len(x_test_anomaly)),
batch_size=batch_size)[:, t]
anomaly_class_scores = anomaly_class_preds.mean(axis=-1)
def affine(x, is_flip, k_rotate):
return tf.image.rot90(tf.image.flip_left_right(x) if is_flip else x,
k=k_rotate)
x_in = Input(batch_shape=mdl.input_shape)
transformations_sm_responses = [mdl(Lambda(affine, arguments={'is_flip': is_flip, 'k_rotate': k_rotate})(x_in))
for is_flip, k_rotate in itertools.product((False, True), range(4))]
out = average([Lambda(lambda sm_res: sm_res[:, j:j+1])(tens) for j, tens in enumerate(transformations_sm_responses)])
inference_mdl = Model(x_in, out)
grads_tensor = K.gradients([inference_mdl.output], [inference_mdl.input])[0]
grads_fn = K.function([inference_mdl.input], [grads_tensor])
def optimize_anomaly_images():
for im_ind in range(len(x_test_anomaly)):
im = x_test_anomaly[im_ind:im_ind+1].copy()
eta = 5
for _ in range(200):
grads = grads_fn([im])[0]
grads[np.abs(grads * im) < np.percentile(np.abs(grads * im), 80)] = 0
im_diff = grads * eta
im_diff *= 0.99
im += im_diff
im = gaussian_filter(im, 0.28)
im = np.clip(im, 0, 1)
im[im < np.percentile(np.abs(im), 80)] = 0
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 2))
ax1.imshow(x_test_anomaly[im_ind].squeeze(), cmap='Greys_r')
ax1.grid(False)
ax1.tick_params(which='both', bottom=False, top=False, left=False, right=False,
labelbottom=False, labeltop=False, labelleft=False, labelright=False)
ax2.imshow(im.squeeze(), cmap='Greys_r')
ax2.grid(False)
ax2.tick_params(which='both', bottom=False, top=False, left=False, right=False,
labelbottom=False, labeltop=False, labelleft=False, labelright=False)
fig.savefig('0_{}.png'.format(im_ind))
plt.close()
print('0_3_{} done'.format(im_ind))
def optimize_normal_images():
for im_ind in range(len(x_train_single)):
im = x_train_single[im_ind:im_ind+1].copy()
eta = 5
for _ in range(200):
grads = grads_fn([im])[0]
grads[np.abs(grads * im) < np.percentile(np.abs(grads * im), 80)] = 0
im_diff = grads * eta
im_diff *= 0.99
im += im_diff
im = gaussian_filter(im, 0.28)
im = np.clip(im, 0, 1)
im[im < np.percentile(np.abs(im), 80)] = 0
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 2))
ax1.imshow(x_train_single[im_ind].squeeze(), cmap='Greys_r')
ax1.grid(False)
ax1.tick_params(which='both', bottom=False, top=False, left=False, right=False,
labelbottom=False, labeltop=False, labelleft=False, labelright=False)
ax2.imshow(im.squeeze(), cmap='Greys_r')
ax2.grid(False)
ax2.tick_params(which='both', bottom=False, top=False, left=False, right=False,
labelbottom=False, labeltop=False, labelleft=False, labelright=False)
fig.savefig('3_{}.png'.format(im_ind))
plt.close()
print('3_3_{} done'.format(im_ind))
optimize_normal_images()
optimize_anomaly_images()
| 39.855172
| 118
| 0.62381
|
4a0295bcad7d232319f1535baf3a93c9e7e35601
| 21,588
|
py
|
Python
|
tests/fib/test_fib.py
|
EchoUUU/sonic-mgmt-999
|
716d519afd04ee144475846d1b681d6b4a6e8b13
|
[
"Apache-2.0"
] | 1
|
2020-12-21T02:13:59.000Z
|
2020-12-21T02:13:59.000Z
|
tests/fib/test_fib.py
|
EchoUUU/sonic-mgmt-999
|
716d519afd04ee144475846d1b681d6b4a6e8b13
|
[
"Apache-2.0"
] | 1
|
2021-12-15T07:02:04.000Z
|
2021-12-15T07:02:04.000Z
|
tests/fib/test_fib.py
|
EchoUUU/sonic-mgmt
|
716d519afd04ee144475846d1b681d6b4a6e8b13
|
[
"Apache-2.0"
] | null | null | null |
import time
import json
import logging
import tempfile
import random
from datetime import datetime
import pytest
import requests
from tests.common.fixtures.ptfhost_utils import run_icmp_responder # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from tests.ptf_runner import ptf_runner
from tests.common.helpers.assertions import pytest_assert
from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_random_side
from tests.common.dualtor.mux_simulator_control import mux_server_url
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology('any')
]
# Usually src-mac, dst-mac, vlan-id are optional hash keys. Not all the platform supports these optional hash keys. Not enable these three by default.
# HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'src-mac', 'dst-mac', 'ip-proto', 'vlan-id']
HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'ip-proto']
SRC_IP_RANGE = ['8.0.0.0', '8.255.255.255']
DST_IP_RANGE = ['9.0.0.0', '9.255.255.255']
SRC_IPV6_RANGE = ['20D0:A800:0:00::', '20D0:A800:0:00::FFFF']
DST_IPV6_RANGE = ['20D0:A800:0:01::', '20D0:A800:0:01::FFFF']
VLANIDS = range(1032, 1279)
VLANIP = '192.168.{}.1/24'
PTF_QLEN = 2000
DEFAULT_MUX_SERVER_PORT = 8080
PTF_TEST_PORT_MAP = '/root/ptf_test_port_map.json'
@pytest.fixture(scope='module')
def config_facts(duthosts):
cfg_facts = {}
for duthost in duthosts:
cfg_facts[duthost.hostname] = []
for asic in duthost.asics:
if asic.is_it_backend():
continue
asic_cfg_facts = asic.config_facts(source='running')['ansible_facts']
cfg_facts[duthost.hostname].append(asic_cfg_facts)
return cfg_facts
@pytest.fixture(scope='module')
def minigraph_facts(duthosts, tbinfo):
return duthosts.get_extended_minigraph_facts(tbinfo)
def get_t2_fib_info(duthosts, all_duts_cfg_facts, all_duts_mg_facts):
"""Get parsed FIB information from redis DB.
Args:
duthost (SonicHost): Object for interacting with DUT.
cfg_facts (dict): Configuration facts.
For multi asic platforms this will be list of dicts
mg_facts (dict): Minigraph facts.
Returns:
dict: Map of prefix to PTF ports that are connected to DUT output ports.
{
'192.168.0.0/21': [],
'192.168.8.0/25': [[58 59] [62 63] [66 67] [70 71]],
'192.168.16.0/25': [[58 59] [62 63] [66 67] [70 71]],
...
'20c0:c2e8:0:80::/64': [[58 59] [62 63] [66 67] [70 71]],
'20c1:998::/64': [[58 59] [62 63] [66 67] [70 71]],
...
}
"""
timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
fib_info = {}
for dut_index, duthost in enumerate(duthosts.frontend_nodes):
cfg_facts = all_duts_cfg_facts[duthost.hostname]
mg_facts = all_duts_mg_facts[duthost.hostname]
for asic_index, asic_cfg_facts in enumerate(cfg_facts):
asic = duthost.asic_instance(asic_index)
asic.shell("{} redis-dump -d 0 -k 'ROUTE*' -y > /tmp/fib.{}.txt".format(asic.ns_arg, timestamp))
duthost.fetch(src="/tmp/fib.{}.txt".format(timestamp), dest="/tmp/fib")
po = asic_cfg_facts.get('PORTCHANNEL', {})
ports = asic_cfg_facts.get('PORT', {})
with open("/tmp/fib/{}/tmp/fib.{}.txt".format(duthost.hostname, timestamp)) as fp:
fib = json.load(fp)
for k, v in fib.items():
skip = False
prefix = k.split(':', 1)[1]
ifnames = v['value']['ifname'].split(',')
nh = v['value']['nexthop']
oports = []
for ifname in ifnames:
if po.has_key(ifname):
# ignore the prefix, if the prefix nexthop is not a frontend port
if 'members' in po[ifname]:
if 'role' in ports[po[ifname]['members'][0]] and ports[po[ifname]['members'][0]]['role'] == 'Int':
skip = True
else:
oports.append([str(mg_facts['minigraph_ptf_indices'][x]) for x in po[ifname]['members']])
else:
if ports.has_key(ifname):
if 'role' in ports[ifname] and ports[ifname]['role'] == 'Int':
skip = True
else:
oports.append([str(mg_facts['minigraph_ptf_indices'][ifname])])
else:
logger.info("Route point to non front panel port {}:{}".format(k, v))
skip = True
# skip direct attached subnet
if nh == '0.0.0.0' or nh == '::' or nh == "":
skip = True
if not skip:
if prefix in fib_info:
fib_info[prefix] += oports
else:
fib_info[prefix] = oports
return fib_info
def get_fib_info(duthost, cfg_facts, mg_facts):
"""Get parsed FIB information from redis DB.
Args:
duthost (SonicHost): Object for interacting with DUT.
cfg_facts (dict): Configuration facts.
For multi asic platforms this will be list of dicts
mg_facts (dict): Minigraph facts.
Returns:
dict: Map of prefix to PTF ports that are connected to DUT output ports.
{
'192.168.0.0/21': [],
'192.168.8.0/25': [[58 59] [62 63] [66 67] [70 71]],
'192.168.16.0/25': [[58 59] [62 63] [66 67] [70 71]],
...
'20c0:c2e8:0:80::/64': [[58 59] [62 63] [66 67] [70 71]],
'20c1:998::/64': [[58 59] [62 63] [66 67] [70 71]],
...
}
"""
timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
fib_info = {}
for asic_index, asic_cfg_facts in enumerate(cfg_facts):
asic = duthost.asic_instance(asic_index)
asic.shell("{} redis-dump -d 0 -k 'ROUTE*' -y > /tmp/fib.{}.txt".format(asic.ns_arg, timestamp))
duthost.fetch(src="/tmp/fib.{}.txt".format(timestamp), dest="/tmp/fib")
po = asic_cfg_facts.get('PORTCHANNEL', {})
ports = asic_cfg_facts.get('PORT', {})
with open("/tmp/fib/{}/tmp/fib.{}.txt".format(duthost.hostname, timestamp)) as fp:
fib = json.load(fp)
for k, v in fib.items():
skip = False
prefix = k.split(':', 1)[1]
ifnames = v['value']['ifname'].split(',')
nh = v['value']['nexthop']
oports = []
for ifname in ifnames:
if po.has_key(ifname):
# ignore the prefix, if the prefix nexthop is not a frontend port
if 'members' in po[ifname]:
if 'role' in ports[po[ifname]['members'][0]] and ports[po[ifname]['members'][0]]['role'] == 'Int':
skip = True
else:
oports.append([str(mg_facts['minigraph_ptf_indices'][x]) for x in po[ifname]['members']])
else:
if ports.has_key(ifname):
if 'role' in ports[ifname] and ports[ifname]['role'] == 'Int':
skip = True
else:
oports.append([str(mg_facts['minigraph_ptf_indices'][ifname])])
else:
logger.info("Route point to non front panel port {}:{}".format(k, v))
skip = True
# skip direct attached subnet
if nh == '0.0.0.0' or nh == '::' or nh == "":
skip = True
if not skip:
if prefix in fib_info:
fib_info[prefix] += oports
else:
fib_info[prefix] = oports
# For single_asic device, add empty list for directly connected subnets
elif skip and not duthost.is_multi_asic:
fib_info[prefix] = []
return fib_info
def gen_fib_info_file(ptfhost, fib_info, filename):
tmp_fib_info = tempfile.NamedTemporaryFile()
for prefix, oports in fib_info.items():
tmp_fib_info.write(prefix)
if oports:
for op in oports:
tmp_fib_info.write(' [{}]'.format(' '.join(op)))
else:
tmp_fib_info.write(' []')
tmp_fib_info.write('\n')
tmp_fib_info.flush()
ptfhost.copy(src=tmp_fib_info.name, dest=filename)
@pytest.fixture(scope='module')
def fib_info_files(duthosts, ptfhost, config_facts, minigraph_facts, tbinfo):
files = []
if tbinfo['topo']['type'] != "t2":
for dut_index, duthost in enumerate(duthosts):
fib_info = get_fib_info(duthost, config_facts[duthost.hostname], minigraph_facts[duthost.hostname])
filename = '/root/fib_info_dut{}.txt'.format(dut_index)
gen_fib_info_file(ptfhost, fib_info, filename)
files.append(filename)
else:
fib_info = get_t2_fib_info(duthosts, config_facts, minigraph_facts)
filename = '/root/fib_info_all_duts.txt'
gen_fib_info_file(ptfhost, fib_info, filename)
files.append(filename)
return files
@pytest.fixture(scope='module')
def disabled_ptf_ports(tbinfo):
ports = set()
for ptf_map in tbinfo['topo']['ptf_map_disabled'].values():
for ptf_port_index in ptf_map.values():
ports.add(ptf_port_index)
return ports
@pytest.fixture(scope='module')
def vlan_ptf_ports(duthosts, config_facts, tbinfo):
ports = set()
for dut_index, duthost in enumerate(duthosts):
for asic_config_fact in config_facts[duthost.hostname]:
for vlan_members in asic_config_fact.get('VLAN_MEMBER', {}).values():
for intf in vlan_members.keys():
dut_port_index = asic_config_fact['port_index_map'][intf]
ports.add(tbinfo['topo']['ptf_map'][str(dut_index)][str(dut_port_index)])
return ports
@pytest.fixture(scope='module')
def router_macs(duthosts):
mac_addresses = []
for duthost in duthosts:
mac_addresses.append(duthost.facts['router_mac'])
return mac_addresses
# For dualtor
@pytest.fixture(scope='module')
def vlan_macs(duthosts, config_facts):
mac_addresses = []
for duthost in duthosts:
dut_vlan_mac = None
for asic_cfg_facts in config_facts[duthost.hostname]:
for vlan in asic_cfg_facts.get('VLAN', {}).values():
if 'mac' in vlan:
dut_vlan_mac = vlan['mac']
break
if not dut_vlan_mac:
dut_vlan_mac = duthost.facts['router_mac']
mac_addresses.append(dut_vlan_mac)
return mac_addresses
def set_mux_side(tbinfo, mux_server_url, side):
if 'dualtor' in tbinfo['topo']['name']:
res = requests.post(mux_server_url, json={"active_side": side})
pytest_assert(res.status_code==200, 'Failed to set active side: {}'.format(res.text))
return res.json() # Response is new mux_status of all mux Y-cables.
return {}
@pytest.fixture
def set_mux_random(tbinfo, mux_server_url):
return set_mux_side(tbinfo, mux_server_url, 'random')
@pytest.fixture
def set_mux_same_side(tbinfo, mux_server_url):
return set_mux_side(tbinfo, mux_server_url, random.choice(['upper_tor', 'lower_tor']))
def get_mux_status(tbinfo, mux_server_url):
if 'dualtor' in tbinfo['topo']['name']:
res = requests.get(mux_server_url)
pytest_assert(res.status_code==200, 'Failed to get mux status: {}'.format(res.text))
return res.json()
return {}
def ptf_test_port_map(ptfhost, tbinfo, mux_server_url, disabled_ptf_ports, vlan_ptf_ports, router_macs, vlan_macs):
active_dut_map = {}
for mux_status in get_mux_status(tbinfo, mux_server_url).values():
active_dut_index = 0 if mux_status['active_side'] == 'upper_tor' else 1
active_dut_map[str(mux_status['port_index'])] = active_dut_index
logger.info('router_macs={}'.format(router_macs))
logger.info('vlan_macs={}'.format(vlan_macs))
logger.info('vlan_ptf_ports={}'.format(vlan_ptf_ports))
logger.info('disabled_ptf_ports={}'.format(disabled_ptf_ports))
logger.info('active_dut_map={}'.format(active_dut_map))
ports_map = {}
for ptf_port, dut_intf_map in tbinfo['topo']['ptf_dut_intf_map'].items():
if int(ptf_port) in disabled_ptf_ports:
continue
target_dut_index = None
target_mac = None
if int(ptf_port) in vlan_ptf_ports: # PTF port connected to VLAN interface of DUT
if active_dut_map: # dualtor topology
# If PTF port is connected to VLAN interface of dualToR DUTs, the PTF port index should be
# same as DUT port index. Base on this fact to find out dut index of active side.
target_dut_index = active_dut_map[ptf_port]
target_mac = vlan_macs[target_dut_index]
if target_dut_index is None:
target_dut_index = int(dut_intf_map.keys()[0]) # None dualtor, target DUT is always the first and only DUT
if target_mac is None:
target_mac = router_macs[target_dut_index]
ports_map[ptf_port] = {'target_dut': target_dut_index, 'target_mac': target_mac}
ptfhost.copy(content=json.dumps(ports_map), dest=PTF_TEST_PORT_MAP)
return PTF_TEST_PORT_MAP
@pytest.fixture(scope="module")
def ignore_ttl(duthosts):
# on the multi asic devices, the packet can have different ttl based on how the packet is routed
# within in the device. So set this flag to mask the ttl in the ptf test
for duthost in duthosts:
if duthost.sonichost.is_multi_asic:
return True
return False
@pytest.fixture(scope="module")
def single_fib_for_duts(tbinfo):
# For a T2 topology, we are generating a single fib file across all asics, but have multiple frontend nodes (DUTS).
if tbinfo['topo']['type'] == "t2":
return True
return False
@pytest.mark.parametrize("ipv4, ipv6, mtu", [pytest.param(True, True, 1514)])
def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, set_mux_random, fib_info_files,
tbinfo, mux_server_url, disabled_ptf_ports, vlan_ptf_ports, router_macs, vlan_macs,
ignore_ttl, single_fib_for_duts):
timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
# do not test load balancing for vs platform as kernel 4.9
# can only do load balance base on L3
if duthosts[0].facts['asic_type'] in ["vs"]:
test_balancing = False
else:
test_balancing = True
logging.info("run ptf test")
log_file = "/tmp/fib_test.FibTest.ipv4.{}.ipv6.{}.{}.log".format(ipv4, ipv6, timestamp)
logging.info("PTF log file: %s" % log_file)
ptf_runner(ptfhost,
"ptftests",
"fib_test.FibTest",
platform_dir="ptftests",
params={"fib_info_files": fib_info_files[:3], # Test at most 3 DUTs
"ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, mux_server_url, disabled_ptf_ports,
vlan_ptf_ports, router_macs, vlan_macs),
"router_macs": router_macs,
"ipv4": ipv4,
"ipv6": ipv6,
"testbed_mtu": mtu,
"test_balancing": test_balancing,
"ignore_ttl": ignore_ttl,
"single_fib_for_duts": single_fib_for_duts},
log_file=log_file,
qlen=PTF_QLEN,
socket_recv_size=16384)
def get_vlan_untag_ports(duthosts, config_facts):
"""
get all untag vlan ports
"""
vlan_untag_ports = {}
for duthost in duthosts:
if duthost.is_multi_asic:
continue
ports = []
vlans = config_facts.get('VLAN_INTERFACE', {}).keys()
for vlan in vlans:
vlan_member_info = config_facts[duthost.hostname].get('VLAN_MEMBER', {}).get(vlan, {})
if vlan_member_info:
for port_name, tag_mode in vlan_member_info.items():
if tag_mode['tagging_mode'] == 'untagged':
ports.append(port_name)
vlan_untag_ports[duthost.hostname] = ports
return vlan_untag_ports
@pytest.fixture(scope="module")
def hash_keys(duthost):
hash_keys = HASH_KEYS[:] # Copy from global var to avoid side effects of multiple iterations
if 'dst-mac' in hash_keys:
hash_keys.remove('dst-mac')
# do not test load balancing on L4 port on vs platform as kernel 4.9
# can only do load balance base on L3
if duthost.facts['asic_type'] in ["vs"]:
if 'src-port' in hash_keys:
hash_keys.remove('src-port')
if 'dst-port' in hash_keys:
hash_keys.remove('dst-port')
if duthost.facts['asic_type'] in ["mellanox"]:
if 'ip-proto' in hash_keys:
hash_keys.remove('ip-proto')
if duthost.facts['asic_type'] in ["barefoot"]:
if 'ingress-port' in hash_keys:
hash_keys.remove('ingress-port')
# removing ingress-port and ip-proto from hash_keys not supported by Marvell SAI
if duthost.facts['platform'] in ['armhf-nokia_ixs7215_52x-r0']:
if 'ip-proto' in hash_keys:
hash_keys.remove('ip-proto')
if 'ingress-port' in hash_keys:
hash_keys.remove('ingress-port')
# remove the ingress port from multi asic platform
# In multi asic platform each asic has different hash seed,
# the same packet coming in different asic
# could egress out of different port
# the hash_test condition for hash_key == ingress_port will fail
if duthost.sonichost.is_multi_asic:
hash_keys.remove('ingress-port')
return hash_keys
def configure_vlan(duthost, ports):
for vlan in VLANIDS:
duthost.shell('config vlan add {}'.format(vlan))
for port in ports:
duthost.shell('config vlan member add {} {}'.format(vlan, port))
duthost.shell('config interface ip add Vlan{} '.format(vlan) + VLANIP.format(vlan%256))
time.sleep(5)
def unconfigure_vlan(duthost, ports):
for vlan in VLANIDS:
for port in ports:
duthost.shell('config vlan member del {} {}'.format(vlan, port))
duthost.shell('config interface ip remove Vlan{} '.format(vlan) + VLANIP.format(vlan%256))
duthost.shell('config vlan del {}'.format(vlan))
time.sleep(5)
@pytest.fixture
def setup_vlan(tbinfo, duthosts, config_facts, hash_keys):
vlan_untag_ports = get_vlan_untag_ports(duthosts, config_facts)
need_to_clean_vlan = False
# add some vlan for hash_key vlan-id test
if tbinfo['topo']['type'] == 't0' and 'dualtor' not in tbinfo['topo']['name'] and 'vlan-id' in hash_keys:
for duthost in duthosts:
configure_vlan(duthost, vlan_untag_ports[duthost.hostname])
need_to_clean_vlan = True
yield
# remove added vlan
if need_to_clean_vlan:
for duthost in duthosts:
unconfigure_vlan(duthost, vlan_untag_ports[duthost.hostname])
@pytest.fixture(params=["ipv4", "ipv6"])
def ipver(request):
return request.param
def test_hash(fib_info_files, setup_vlan, hash_keys, ptfhost, ipver, set_mux_same_side,
tbinfo, mux_server_url, disabled_ptf_ports, vlan_ptf_ports, router_macs, vlan_macs,
ignore_ttl, single_fib_for_duts):
timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
log_file = "/tmp/hash_test.HashTest.{}.{}.log".format(ipver, timestamp)
logging.info("PTF log file: %s" % log_file)
if ipver == "ipv4":
src_ip_range = SRC_IP_RANGE
dst_ip_range = DST_IP_RANGE
else:
src_ip_range = SRC_IPV6_RANGE
dst_ip_range = DST_IPV6_RANGE
ptf_runner(ptfhost,
"ptftests",
"hash_test.HashTest",
platform_dir="ptftests",
params={"fib_info_files": fib_info_files[:3], # Test at most 3 DUTs
"ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, mux_server_url, disabled_ptf_ports,
vlan_ptf_ports, router_macs, vlan_macs),
"hash_keys": hash_keys,
"src_ip_range": ",".join(src_ip_range),
"dst_ip_range": ",".join(dst_ip_range),
"router_macs": router_macs,
"vlan_ids": VLANIDS,
"ignore_ttl":ignore_ttl,
"single_fib_for_duts": single_fib_for_duts
},
log_file=log_file,
qlen=PTF_QLEN,
socket_recv_size=16384)
| 40.578947
| 150
| 0.590328
|
4a0295bf7c49570bbaab4a92f9f0c81be6207405
| 2,184
|
py
|
Python
|
neural_loop_combiner/dataset/dataSampler.py
|
mir-aidj/neural-Loop-combiner
|
0bda8a5e68ef3fe7c05bf8ff440ddde86c21a2cc
|
[
"MIT"
] | 22
|
2020-07-31T08:11:03.000Z
|
2021-08-23T07:50:24.000Z
|
neural_loop_combiner/dataset/dataSampler.py
|
mir-aidj/neural-Loop-combiner
|
0bda8a5e68ef3fe7c05bf8ff440ddde86c21a2cc
|
[
"MIT"
] | null | null | null |
neural_loop_combiner/dataset/dataSampler.py
|
mir-aidj/neural-Loop-combiner
|
0bda8a5e68ef3fe7c05bf8ff440ddde86c21a2cc
|
[
"MIT"
] | 1
|
2021-02-26T09:14:47.000Z
|
2021-02-26T09:14:47.000Z
|
import warnings
warnings.filterwarnings("ignore")
import os
import random
import librosa
import numpy as np
from spleeter.separator import Separator
from spleeter.audio.adapter import get_default_audio_adapter
from neural_loop_combiner.utils.utils import log_message, data_exclude
from neural_loop_combiner.config import settings
from neural_loop_combiner.dataset.sampler import Sampler
class DataSampler:
def __init__(self, tracks_key, tracks_dict, idv_datas, harm_datas, data_type, log_info=[]):
self.sr = settings.SR
self.cache = settings.CACHE
self.dur = settings.DUR
self.log = settings.LOG
self.out_dir = settings.OUT_DIR
self.ng_types = [neg_type for neg_type in settings.NG_TYPES.keys() if settings.NG_TYPES[neg_type] == 1]
self.data_type = data_type
self.idv_datas = idv_datas
self.harm_datas = harm_datas
self.tracks_key = tracks_key
self.tracks_dict = tracks_dict
def sampling(self):
tracks_key = self.tracks_key
tracks_dict = self.tracks_dict
ng_types = self.ng_types
idv_datas = self.idv_datas
harm_datas = self.harm_datas
data_type = self.data_type
log = self.log
neg_datas = {ng_type:[] for ng_type in ng_types}
total = len(tracks_key)
for i, track_key in enumerate(tracks_key):
excl_datas = tracks_dict[track_key]['loops_path']
pair_datas = tracks_dict[track_key]['pairs_path']
other_datas = data_exclude(idv_datas, excl_datas)
for pair_data in pair_datas:
neg_dict = Sampler(pair_data, other_datas, harm_datas).sampling()
for neg_type in neg_dict:
neg_datas[neg_type].append(neg_dict[neg_type])
if log: log_message(f'Negative Sampling processing ({data_type})', [i+1, total])
if log: log_message(f'Negative Sampling completed ({data_type})')
return neg_datas
| 35.803279
| 114
| 0.622711
|
4a02960e4d6960e9d096a32b27e38e203fdd9421
| 397
|
py
|
Python
|
03/03-1.py
|
pak21/aoc2017
|
622ca0df32cbe22f147c3aeb0dc07a8edfc17095
|
[
"MIT"
] | null | null | null |
03/03-1.py
|
pak21/aoc2017
|
622ca0df32cbe22f147c3aeb0dc07a8edfc17095
|
[
"MIT"
] | null | null | null |
03/03-1.py
|
pak21/aoc2017
|
622ca0df32cbe22f147c3aeb0dc07a8edfc17095
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import math
import sys
def calc(n):
f = math.floor(math.sqrt(n - 1))
d = n - f * f - 1
if f % 2:
dp = d % (f + 1)
m = f - dp if dp < f / 2 else dp + 1
else:
dp = d % f
m = f - dp if dp <= f / 2 else dp
print('{} {}'.format(n, m))
for n in range(2, 26):
calc(n)
calc(1024)
for n in range(361520, 361531):
calc(n)
| 15.88
| 44
| 0.460957
|
4a02963ef361ad9fbaa8c5998ecf612a8a0946d2
| 5,520
|
py
|
Python
|
Documentation/conf.py
|
ZirakZaheer/cilium
|
4fed9902a6342dc54d853dca2e22fd148590ed26
|
[
"Apache-2.0"
] | 1
|
2019-10-16T04:01:13.000Z
|
2019-10-16T04:01:13.000Z
|
Documentation/conf.py
|
jukylin/cilium
|
cc7a4cb6881639caeae4b34512741e82e2b4c041
|
[
"Apache-2.0"
] | null | null | null |
Documentation/conf.py
|
jukylin/cilium
|
cc7a4cb6881639caeae4b34512741e82e2b4c041
|
[
"Apache-2.0"
] | 1
|
2020-09-02T04:07:20.000Z
|
2020-09-02T04:07:20.000Z
|
# -*- coding: utf-8 -*-
#
# Cilium documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 18:34:43 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
import re
import subprocess
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
'sphinxcontrib.openapi',
'sphinx_tabs.tabs']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cilium'
copyright = u'2017, Cilium Authors'
author = u'Cilium Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = open("../VERSION", "r").read().strip()
# Asume the current branch is master but check with VERSION file.
branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True)
githubusercontent = 'https://raw.githubusercontent.com/cilium/cilium/'
scm_web = githubusercontent + branch
# Store variables in the epilogue so they are globally available.
rst_epilog = """
.. |SCM_WEB| replace:: \{s}
""".format(s = scm_web)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_themes/**/*.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images', '_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ciliumdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
'extraclassoptions': 'openany',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Cilium.tex', u'Cilium Documentation',
u'Cilium Authors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cilium', u'Cilium Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Cilium', u'Cilium Documentation',
author, 'Cilium', 'One line description of project.',
'Miscellaneous'),
]
http_strict_mode = False
# Try as hard as possible to find references
default_role = 'any'
def setup(app):
app.add_stylesheet('parsed-literal.css')
| 30.837989
| 79
| 0.685688
|
4a02965bc010a53280e087f571870082a9b547a8
| 18,152
|
py
|
Python
|
pyqtgraph/exporters/SVGExporter.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | null | null | null |
pyqtgraph/exporters/SVGExporter.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | null | null | null |
pyqtgraph/exporters/SVGExporter.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | null | null | null |
from .Exporter import Exporter
from ..python2_3 import asUnicode
from ..parametertree import Parameter
from ..Qt import QtGui, QtCore, QtSvg
from .. import debug
from .. import functions as fn
import re
import xml.dom.minidom as xml
import numpy as np
__all__ = ['SVGExporter']
class SVGExporter(Exporter):
Name = "Scalable Vector Graphics (SVG)"
allowCopy=True
def __init__(self, item):
Exporter.__init__(self, item)
#tr = self.getTargetRect()
self.params = Parameter(name='params', type='group', children=[
#{'name': 'width', 'type': 'float', 'value': tr.width(), 'limits': (0, None)},
#{'name': 'height', 'type': 'float', 'value': tr.height(), 'limits': (0, None)},
#{'name': 'viewbox clipping', 'type': 'bool', 'value': True},
#{'name': 'normalize coordinates', 'type': 'bool', 'value': True},
#{'name': 'normalize line width', 'type': 'bool', 'value': True},
])
#self.params.param('width').sigValueChanged.connect(self.widthChanged)
#self.params.param('height').sigValueChanged.connect(self.heightChanged)
def widthChanged(self):
sr = self.getSourceRect()
ar = sr.height() / sr.width()
self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = sr.width() / sr.height()
self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)
def parameters(self):
return self.params
def export(self, fileName=None, toBytes=False, copy=False):
if toBytes is False and copy is False and fileName is None:
self.fileSaveDialog(filter="Scalable Vector Graphics (*.svg)")
return
#self.svg = QtSvg.QSvgGenerator()
#self.svg.setFileName(fileName)
#dpi = QtGui.QDesktopWidget().physicalDpiX()
### not really sure why this works, but it seems to be important:
#self.svg.setSize(QtCore.QSize(self.params['width']*dpi/90., self.params['height']*dpi/90.))
#self.svg.setResolution(dpi)
##self.svg.setViewBox()
#targetRect = QtCore.QRect(0, 0, self.params['width'], self.params['height'])
#sourceRect = self.getSourceRect()
#painter = QtGui.QPainter(self.svg)
#try:
#self.setExportMode(True)
#self.render(painter, QtCore.QRectF(targetRect), sourceRect)
#finally:
#self.setExportMode(False)
#painter.end()
## Workaround to set pen widths correctly
#data = open(fileName).readlines()
#for i in range(len(data)):
#line = data[i]
#m = re.match(r'(<g .*)stroke-width="1"(.*transform="matrix\(([^\)]+)\)".*)', line)
#if m is not None:
##print "Matched group:", line
#g = m.groups()
#matrix = list(map(float, g[2].split(',')))
##print "matrix:", matrix
#scale = max(abs(matrix[0]), abs(matrix[3]))
#if scale == 0 or scale == 1.0:
#continue
#data[i] = g[0] + ' stroke-width="%0.2g" ' % (1.0/scale) + g[1] + '\n'
##print "old line:", line
##print "new line:", data[i]
#open(fileName, 'w').write(''.join(data))
## Qt's SVG generator is not complete. (notably, it lacks clipping)
## Instead, we will use Qt to generate SVG for each item independently,
## then manually reconstruct the entire document.
xml = generateSvg(self.item)
if toBytes:
return xml.encode('UTF-8')
elif copy:
md = QtCore.QMimeData()
md.setData('image/svg+xml', QtCore.QByteArray(xml.encode('UTF-8')))
QtGui.QApplication.clipboard().setMimeData(md)
else:
with open(fileName, 'wb') as fh:
fh.write(asUnicode(xml).encode('utf-8'))
xmlHeader = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.2" baseProfile="tiny">
<title>pyqtgraph SVG export</title>
<desc>Generated with Qt and pyqtgraph</desc>
<defs>
</defs>
"""
def generateSvg(item):
global xmlHeader
try:
node = _generateItemSvg(item)
finally:
## reset export mode for all items in the tree
if isinstance(item, QtGui.QGraphicsScene):
items = item.items()
else:
items = [item]
for i in items:
items.extend(i.childItems())
for i in items:
if hasattr(i, 'setExportMode'):
i.setExportMode(False)
cleanXml(node)
return xmlHeader + node.toprettyxml(indent=' ') + "\n</svg>\n"
def _generateItemSvg(item, nodes=None, root=None):
## This function is intended to work around some issues with Qt's SVG generator
## and SVG in general.
## 1) Qt SVG does not implement clipping paths. This is absurd.
## The solution is to let Qt generate SVG for each item independently,
## then glue them together manually with clipping.
##
## The format Qt generates for all items looks like this:
##
## <g>
## <g transform="matrix(...)">
## one or more of: <path/> or <polyline/> or <text/>
## </g>
## <g transform="matrix(...)">
## one or more of: <path/> or <polyline/> or <text/>
## </g>
## . . .
## </g>
##
## 2) There seems to be wide disagreement over whether path strokes
## should be scaled anisotropically.
## see: http://web.mit.edu/jonas/www/anisotropy/
## Given that both inkscape and illustrator seem to prefer isotropic
## scaling, we will optimize for those cases.
##
## 3) Qt generates paths using non-scaling-stroke from SVG 1.2, but
## inkscape only supports 1.1.
##
## Both 2 and 3 can be addressed by drawing all items in world coordinates.
profiler = debug.Profiler()
if nodes is None: ## nodes maps all node IDs to their XML element.
## this allows us to ensure all elements receive unique names.
nodes = {}
if root is None:
root = item
## Skip hidden items
if hasattr(item, 'isVisible') and not item.isVisible():
return None
## If this item defines its own SVG generator, use that.
if hasattr(item, 'generateSvg'):
return item.generateSvg(nodes)
## Generate SVG text for just this item (exclude its children; we'll handle them later)
tr = QtGui.QTransform()
if isinstance(item, QtGui.QGraphicsScene):
xmlStr = "<g>\n</g>\n"
doc = xml.parseString(xmlStr)
childs = [i for i in item.items() if i.parentItem() is None]
elif item.__class__.paint == QtGui.QGraphicsItem.paint:
xmlStr = "<g>\n</g>\n"
doc = xml.parseString(xmlStr)
childs = item.childItems()
else:
childs = item.childItems()
tr = itemTransform(item, item.scene())
## offset to corner of root item
if isinstance(root, QtGui.QGraphicsScene):
rootPos = QtCore.QPoint(0,0)
else:
rootPos = root.scenePos()
tr2 = QtGui.QTransform()
tr2.translate(-rootPos.x(), -rootPos.y())
tr = tr * tr2
arr = QtCore.QByteArray()
buf = QtCore.QBuffer(arr)
svg = QtSvg.QSvgGenerator()
svg.setOutputDevice(buf)
dpi = QtGui.QDesktopWidget().physicalDpiX()
svg.setResolution(dpi)
p = QtGui.QPainter()
p.begin(svg)
if hasattr(item, 'setExportMode'):
item.setExportMode(True, {'painter': p})
try:
p.setTransform(tr)
item.paint(p, QtGui.QStyleOptionGraphicsItem(), None)
finally:
p.end()
## Can't do this here--we need to wait until all children have painted as well.
## this is taken care of in generateSvg instead.
#if hasattr(item, 'setExportMode'):
#item.setExportMode(False)
xmlStr = bytes(arr).decode('utf-8')
doc = xml.parseString(xmlStr)
try:
## Get top-level group for this item
g1 = doc.getElementsByTagName('g')[0]
## get list of sub-groups
g2 = [n for n in g1.childNodes if isinstance(n, xml.Element) and n.tagName == 'g']
except:
print(doc.toxml())
raise
profiler('render')
## Get rid of group transformation matrices by applying
## transformation to inner coordinates
correctCoordinates(g1, item)
profiler('correct')
## make sure g1 has the transformation matrix
#m = (tr.m11(), tr.m12(), tr.m21(), tr.m22(), tr.m31(), tr.m32())
#g1.setAttribute('transform', "matrix(%f,%f,%f,%f,%f,%f)" % m)
#print "=================",item,"====================="
#print g1.toprettyxml(indent=" ", newl='')
## Inkscape does not support non-scaling-stroke (this is SVG 1.2, inkscape supports 1.1)
## So we need to correct anything attempting to use this.
#correctStroke(g1, item, root)
## decide on a name for this item
baseName = item.__class__.__name__
i = 1
while True:
name = baseName + "_%d" % i
if name not in nodes:
break
i += 1
nodes[name] = g1
g1.setAttribute('id', name)
## If this item clips its children, we need to take care of that.
childGroup = g1 ## add children directly to this node unless we are clipping
if not isinstance(item, QtGui.QGraphicsScene):
## See if this item clips its children
if int(item.flags() & item.ItemClipsChildrenToShape) > 0:
## Generate svg for just the path
#if isinstance(root, QtGui.QGraphicsScene):
#path = QtGui.QGraphicsPathItem(item.mapToScene(item.shape()))
#else:
#path = QtGui.QGraphicsPathItem(root.mapToParent(item.mapToItem(root, item.shape())))
path = QtGui.QGraphicsPathItem(item.mapToScene(item.shape()))
item.scene().addItem(path)
try:
pathNode = _generateItemSvg(path, root=root).getElementsByTagName('path')[0]
finally:
item.scene().removeItem(path)
## and for the clipPath element
clip = name + '_clip'
clipNode = g1.ownerDocument.createElement('clipPath')
clipNode.setAttribute('id', clip)
clipNode.appendChild(pathNode)
g1.appendChild(clipNode)
childGroup = g1.ownerDocument.createElement('g')
childGroup.setAttribute('clip-path', 'url(#%s)' % clip)
g1.appendChild(childGroup)
profiler('clipping')
## Add all child items as sub-elements.
childs.sort(key=lambda c: c.zValue())
for ch in childs:
cg = _generateItemSvg(ch, nodes, root)
if cg is None:
continue
childGroup.appendChild(cg) ### this isn't quite right--some items draw below their parent (good enough for now)
profiler('children')
return g1
def correctCoordinates(node, item):
## Remove transformation matrices from <g> tags by applying matrix to coordinates inside.
## Each item is represented by a single top-level group with one or more groups inside.
## Each inner group contains one or more drawing primitives, possibly of different types.
groups = node.getElementsByTagName('g')
## Since we leave text unchanged, groups which combine text and non-text primitives must be split apart.
## (if at some point we start correcting text transforms as well, then it should be safe to remove this)
groups2 = []
for grp in groups:
subGroups = [grp.cloneNode(deep=False)]
textGroup = None
for ch in grp.childNodes[:]:
if isinstance(ch, xml.Element):
if textGroup is None:
textGroup = ch.tagName == 'text'
if ch.tagName == 'text':
if textGroup is False:
subGroups.append(grp.cloneNode(deep=False))
textGroup = True
else:
if textGroup is True:
subGroups.append(grp.cloneNode(deep=False))
textGroup = False
subGroups[-1].appendChild(ch)
groups2.extend(subGroups)
for sg in subGroups:
node.insertBefore(sg, grp)
node.removeChild(grp)
groups = groups2
for grp in groups:
matrix = grp.getAttribute('transform')
match = re.match(r'matrix\((.*)\)', matrix)
if match is None:
vals = [1,0,0,1,0,0]
else:
vals = [float(a) for a in match.groups()[0].split(',')]
tr = np.array([[vals[0], vals[2], vals[4]], [vals[1], vals[3], vals[5]]])
removeTransform = False
for ch in grp.childNodes:
if not isinstance(ch, xml.Element):
continue
if ch.tagName == 'polyline':
removeTransform = True
coords = np.array([[float(a) for a in c.split(',')] for c in ch.getAttribute('points').strip().split(' ')])
coords = fn.transformCoordinates(tr, coords, transpose=True)
ch.setAttribute('points', ' '.join([','.join([str(a) for a in c]) for c in coords]))
elif ch.tagName == 'path':
removeTransform = True
newCoords = ''
oldCoords = ch.getAttribute('d').strip()
if oldCoords == '':
continue
for c in oldCoords.split(' '):
x,y = c.split(',')
if x[0].isalpha():
t = x[0]
x = x[1:]
else:
t = ''
nc = fn.transformCoordinates(tr, np.array([[float(x),float(y)]]), transpose=True)
newCoords += t+str(nc[0,0])+','+str(nc[0,1])+' '
ch.setAttribute('d', newCoords)
elif ch.tagName == 'text':
removeTransform = False
## leave text alone for now. Might need this later to correctly render text with outline.
#c = np.array([
#[float(ch.getAttribute('x')), float(ch.getAttribute('y'))],
#[float(ch.getAttribute('font-size')), 0],
#[0,0]])
#c = fn.transformCoordinates(tr, c, transpose=True)
#ch.setAttribute('x', str(c[0,0]))
#ch.setAttribute('y', str(c[0,1]))
#fs = c[1]-c[2]
#fs = (fs**2).sum()**0.5
#ch.setAttribute('font-size', str(fs))
## Correct some font information
families = ch.getAttribute('font-family').split(',')
if len(families) == 1:
font = QtGui.QFont(families[0].strip('" '))
if font.style() == font.SansSerif:
families.append('sans-serif')
elif font.style() == font.Serif:
families.append('serif')
elif font.style() == font.Courier:
families.append('monospace')
ch.setAttribute('font-family', ', '.join([f if ' ' not in f else '"%s"'%f for f in families]))
## correct line widths if needed
if removeTransform and ch.getAttribute('vector-effect') != 'non-scaling-stroke':
w = float(grp.getAttribute('stroke-width'))
s = fn.transformCoordinates(tr, np.array([[w,0], [0,0]]), transpose=True)
w = ((s[0]-s[1])**2).sum()**0.5
ch.setAttribute('stroke-width', str(w))
if removeTransform:
grp.removeAttribute('transform')
SVGExporter.register()
def itemTransform(item, root):
## Return the transformation mapping item to root
## (actually to parent coordinate system of root)
if item is root:
tr = QtGui.QTransform()
tr.translate(*item.pos())
tr = tr * item.transform()
return tr
if int(item.flags() & item.ItemIgnoresTransformations) > 0:
pos = item.pos()
parent = item.parentItem()
if parent is not None:
pos = itemTransform(parent, root).map(pos)
tr = QtGui.QTransform()
tr.translate(pos.x(), pos.y())
tr = item.transform() * tr
else:
## find next parent that is either the root item or
## an item that ignores its transformation
nextRoot = item
while True:
nextRoot = nextRoot.parentItem()
if nextRoot is None:
nextRoot = root
break
if nextRoot is root or int(nextRoot.flags() & nextRoot.ItemIgnoresTransformations) > 0:
break
if isinstance(nextRoot, QtGui.QGraphicsScene):
tr = item.sceneTransform()
else:
tr = itemTransform(nextRoot, root) * item.itemTransform(nextRoot)[0]
return tr
def cleanXml(node):
## remove extraneous text; let the xml library do the formatting.
hasElement = False
nonElement = []
for ch in node.childNodes:
if isinstance(ch, xml.Element):
hasElement = True
cleanXml(ch)
else:
nonElement.append(ch)
if hasElement:
for ch in nonElement:
node.removeChild(ch)
elif node.tagName == 'g': ## remove childless groups
node.parentNode.removeChild(node)
| 39.290043
| 123
| 0.551344
|
4a029740fffa1005ad0b4b3ac1b51b4c140f4b10
| 11,056
|
py
|
Python
|
src/pyvesync/vesyncswitch.py
|
webdjoe-bot/pyvesync
|
48242e70b15221c0bfa6e7c4391cf171f949280d
|
[
"MIT"
] | null | null | null |
src/pyvesync/vesyncswitch.py
|
webdjoe-bot/pyvesync
|
48242e70b15221c0bfa6e7c4391cf171f949280d
|
[
"MIT"
] | 1
|
2021-02-01T06:27:47.000Z
|
2021-02-08T23:22:24.000Z
|
src/pyvesync/vesyncswitch.py
|
webdjoe-bot/pyvesync
|
48242e70b15221c0bfa6e7c4391cf171f949280d
|
[
"MIT"
] | null | null | null |
"""Classes for VeSync Switch Devices."""
import logging
import json
from abc import ABCMeta, abstractmethod
from pyvesync.helpers import Helpers as helpers
from pyvesync.vesyncbasedevice import VeSyncBaseDevice
logger = logging.getLogger(__name__)
feature_dict = {
'ESWL01': [],
'ESWD16': ['dimmable'],
'ESWL03': []
}
class VeSyncSwitch(VeSyncBaseDevice):
"""Etekcity Switch Base Class."""
__metaclasss__ = ABCMeta
def __init__(self, details, manager):
"""Initialize Switch Base Class."""
super().__init__(details, manager)
self.details = {}
def is_dimmable(self):
"""Return True if switch is dimmable."""
if 'dimmable' in feature_dict.get(self.device_type, []):
return True
else:
return False
@abstractmethod
def get_details(self):
"""Get Device Details."""
@abstractmethod
def turn_on(self):
"""Turn Switch On."""
@abstractmethod
def turn_off(self):
"""Turn switch off."""
@abstractmethod
def get_config(self):
"""Get configuration and firmware deatils."""
@property
def active_time(self):
"""Get active time of switch."""
return self.details.get('active_time', 0)
def update(self):
"""Update device details."""
self.get_details()
class VeSyncWallSwitch(VeSyncSwitch):
"""Etekcity standard wall switch class."""
def __init__(self, details, manager):
"""Initialize standard etekcity wall switch class."""
super(VeSyncWallSwitch, self).__init__(details, manager)
def get_details(self):
"""Get switch device details."""
body = helpers.req_body(self.manager, 'devicedetail')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicedetail',
'post',
headers=head,
json=body
)
if r is not None and helpers.code_check(r):
self.device_status = r.get('deviceStatus', self.device_status)
self.details['active_time'] = r.get('activeTime', 0)
self.connection_status = r.get('connectionStatus',
self.connection_status)
else:
logger.debug('Error getting %s details', self.device_name)
def get_config(self):
"""Get switch device configuration info."""
body = helpers.req_body(self.manager, 'devicedetail')
body['method'] = 'configurations'
body['uuid'] = self.uuid
r, _ = helpers.call_api(
'/inwallswitch/v1/device/configurations',
'post',
headers=helpers.req_headers(self.manager),
json=body)
if helpers.code_check(r):
self.config = helpers.build_config_dict(r)
else:
logger.warning("Unable to get %s config info", self.device_name)
def turn_off(self):
"""Turn off switch device."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = 'off'
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicestatus',
'put',
headers=head,
json=body
)
if r is not None and helpers.code_check(r):
self.device_status = 'off'
return True
logger.warning('Error turning %s off', self.device_name)
return False
def turn_on(self):
"""Turn on switch device."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = 'on'
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicestatus',
'put',
headers=head,
json=body
)
if r is not None and helpers.code_check(r):
self.device_status = 'on'
return True
logger.warning('Error turning %s on', self.device_name)
return False
class VeSyncDimmerSwitch(VeSyncSwitch):
"""Vesync Dimmer Switch Class with RGB Faceplate."""
def __init__(self, details, manager):
"""Initilize dimmer switch class."""
super().__init__(details, manager)
self._brightness = None
self._rgb_value = {'red': 0, 'blue': 0, 'green': 0}
self._rgb_status = None
self._indicator_light = None
def get_details(self):
"""Get dimmer switch details."""
body = helpers.req_body(self.manager, 'devicedetail')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/devicedetail',
'post',
headers=head,
json=body)
if r is not None and helpers.code_check(r):
self.device_status = r.get('deviceStatus', self.device_status)
self.details['active_time'] = r.get('activeTime', 0)
self.connection_status = r.get('connectionStatus',
self.connection_status)
self._brightness = r.get('brightness')
self._rgb_status = r.get('rgbStatus')
self._rgb_value = r.get('rgbValue')
self._indicator_light = r.get('indicatorlightStatus')
else:
logger.debug('Error getting %s details', self.device_name)
@property
def brightness(self):
"""Return brightness in percent."""
return self._brightness
@property
def indicator_light_status(self):
"""Faceplate brightness light status."""
return self._indicator_light
@property
def rgb_light_status(self):
"""RGB Faceplate light status."""
return self._rgb_status
@property
def rgb_light_value(self):
"""RGB Light Values."""
return self._rgb_value
def switch_toggle(self, status: str) -> bool:
"""Toggle switch status."""
if status not in ['on', 'off']:
logger.debug('Invalid status passed to wall switch')
return False
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/devicestatus',
'put',
headers=head,
json=body
)
if r is not None and helpers.code_check(r):
self.device_status = status
return True
logger.warning('Error turning %s %s', self.device_name, status)
return False
def turn_on(self) -> bool:
"""Turn switch on."""
return self.switch_toggle('on')
def turn_off(self) -> bool:
"""Turn switch off."""
return self.switch_toggle('off')
def indicator_light_toggle(self, status: str) -> bool:
"""Toggle indicator light."""
if status not in ['on', 'off']:
logger.debug('Invalid status for wall switch')
return False
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/indicatorlightstatus',
'put',
headers=head,
json=body
)
if r is not None and helpers.code_check(r):
self.device_status = status
return True
logger.warning('Error turning %s indicator light %s',
self.device_name, status)
return False
def indicator_light_on(self) -> bool:
"""Turn Indicator light on."""
return self.indicator_light_toggle('on')
def indicator_light_off(self) -> bool:
"""Turn indicator light off."""
return self.indicator_light_toggle('off')
def rgb_color_status(self, status: str, red: int = None,
blue: int = None, green: int = None) -> bool:
"""Set faceplate RGB color."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
if red is not None and blue is not None and green is not None:
body['rgbValue'] = {'red': red, 'blue': blue, 'green': green}
r, _ = helpers.call_api(
'/dimmer/v1/device/devicergbstatus',
'put',
headers=head,
json=body
)
if r is not None and helpers.code_check(r):
self._rgb_status = status
if body.get('rgbValue') is not None:
self._rgb_value = {'red': red, 'blue': blue, 'green': green}
return True
logger.warning('Error turning %s off', self.device_name)
return False
def rgb_color_off(self) -> bool:
"""Turn RGB Color Off."""
return self.rgb_color_status('off')
def rgb_color_on(self) -> bool:
"""Turn RGB Color Off."""
return self.rgb_color_status('on')
def rgb_color_set(self, red: int, green: int, blue: int) -> bool:
"""Set RGB color of faceplate."""
if (isinstance(red, int) and
isinstance(green, int) and
isinstance(blue, int)):
for color in [red, green, blue]:
if color < 0 or color > 255:
logger.warning('Invalid RGB value')
return False
return self.rgb_color_status('on', red, green, blue)
def set_brightness(self, brightness: int):
"""Set brightness of dimmer - 1 - 100."""
if isinstance(brightness, int) and \
(brightness > 0 or brightness <= 100):
body = helpers.req_body(self.manager, 'devicestatus')
body['brightness'] = brightness
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/updatebrightness',
'put',
headers=head,
json=body)
if r is not None and helpers.code_check(r):
self._brightness = brightness
return True
else:
logger.warning('Error setting %s brightness', self.device_name)
else:
logger.warning('Invalid brightness')
return False
def displayJSON(self):
"""JSON API for dimmer switch."""
sup = super().displayJSON()
supVal = json.loads(sup)
if self.is_dimmable:
supVal.update({
"Indicator Light": str(self.active_time),
"Brightness": str(self._brightness),
"RGB Light": str(self._rgb_status)
})
return supVal
| 31.770115
| 79
| 0.568379
|
4a029744352c4353bb3c7e2c5bc3b66dd45e1037
| 13,838
|
py
|
Python
|
src/omop_cdm_classes_5_2.py
|
jhajagos/CommonDataModelMapper
|
65d2251713e5581b76cb16e36424d61fb194c901
|
[
"Apache-2.0"
] | 1
|
2019-06-14T02:26:35.000Z
|
2019-06-14T02:26:35.000Z
|
src/omop_cdm_classes_5_2.py
|
jhajagos/CommonDataModelMapper
|
65d2251713e5581b76cb16e36424d61fb194c901
|
[
"Apache-2.0"
] | null | null | null |
src/omop_cdm_classes_5_2.py
|
jhajagos/CommonDataModelMapper
|
65d2251713e5581b76cb16e36424d61fb194c901
|
[
"Apache-2.0"
] | 1
|
2019-08-12T20:19:28.000Z
|
2019-08-12T20:19:28.000Z
|
from mapping_classes import OutputClass
class DomainObject(OutputClass):
def fields(self):
return ["domain_id", "domain_name", "domain_concept_id"]
def table_name(self):
return "domain"
class ConceptObject(OutputClass):
def fields(self):
return ["concept_id", "concept_name", "domain_id", "vocabulary_id", "concept_class_id", "standard_concept",
"concept_code", "valid_start_date", "valid_end_date", "invalid_reason"]
def table_name(self):
return "concept"
class ObservationPeriodObject(OutputClass):
def fields(self):
return ["observation_period_id", "person_id",
"observation_period_start_date",
#"observation_period_start_datetime",
"observation_period_end_date",
#"observation_period_end_datetime",
"period_type_concept_id"]
def table_name(self):
return "observation_period"
class VisitOccurrenceObject(OutputClass):
def fields(self):
return ["visit_occurrence_id", "person_id", "visit_concept_id", "visit_start_date", "visit_start_datetime",
"visit_end_date", "visit_end_datetime", "visit_type_concept_id", "provider_id", "care_site_id",
"visit_source_value", "visit_source_concept_id", "admitting_source_concept_id",
"admitting_source_value", "discharge_to_concept_id", "discharge_to_source_value",
"preceding_visit_occurrence_id"]
def table_name(self):
return "visit_occurrence"
class DrugStrengthObject(OutputClass):
def fields(self):
return ["drug_concept_id", "ingredient_concept_id", "amount_value", "amount_unit_concept_id",
"numerator_value", "numerator_unit_concept_id", "denominator_value", "denominator_unit_concept_id",
"box_size", "valid_start_date", "valid_end_date", "invalid_reason"]
def table_name(self):
return "drug_strength"
class PayerPlanPeriodObject(OutputClass):
def fields(self):
return ["payer_plan_period_id", "person_id", "payer_plan_period_start_date", "payer_plan_period_end_date",
"payer_source_value", "plan_source_value", "family_source_value"]
def table_name(self):
return "payer_plan_period"
class CostObject(OutputClass):
def fields(self):
return ["cost_id", "cost_event_id", "cost_domain_id", "cost_type_concept_id", "currency_concept_id",
"total_charge", "total_cost", "total_paid", "paid_by_payer", "paid_by_patient",
"paid_patient_copay", "paid_patient_coinsurance", "paid_patient_deductible", "paid_by_primary",
"paid_ingredient_cost", "paid_dispensing_fee", "payer_plan_period_id", "amount_allowed",
"revenue_code_concept_id", "revenue_code_source_value", "drg_concept_id", "drg_source_value"]
def table_name(self):
return "cost"
class DeviceExposureObject(OutputClass):
def fields(self):
return ["device_exposure_id", "person_id", "device_concept_id", "device_exposure_start_date",
"device_exposure_start_datetime", "device_exposure_end_date", "device_exposure_end_datetime",
"device_type_concept_id", "unique_device_id", "quantity", "provider_id", "visit_occurrence_id",
"device_source_value", "device_source_concept_id"]
def table_name(self):
return "device_exposure"
class MeasurementObject(OutputClass):
def fields(self):
return ["measurement_id", "person_id", "measurement_concept_id", "measurement_date", "measurement_datetime",
"measurement_type_concept_id", "operator_concept_id", "value_as_number", "value_as_concept_id",
"unit_concept_id", "range_low", "range_high", "provider_id", "visit_occurrence_id",
"measurement_source_value", "measurement_source_concept_id", "unit_source_value",
"value_source_value"]
def table_name(self):
return "measurement"
class ConceptRelationshipObject(OutputClass):
def fields(self):
return ["concept_id_1", "concept_id_2", "relationship_id", "valid_start_date", "valid_end_date",
"invalid_reason"]
def table_name(self):
return "concept_relationship"
class FactRelationshipObject(OutputClass):
def fields(self):
return ["domain_concept_id_1", "fact_id_1", "domain_concept_id_2", "fact_id_2", "relationship_concept_id"]
def table_name(self):
return "fact_relationship"
class CohortObject(OutputClass):
def fields(self):
return ["cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date"]
def table_name(self):
return "cohort"
class DeathObject(OutputClass):
def fields(self):
return ["person_id", "death_date", "death_datetime", "death_type_concept_id", "cause_concept_id",
"cause_source_value", "cause_source_concept_id"]
def table_name(self):
return "death"
class DoseEraObject(OutputClass):
def fields(self):
return ["dose_era_id", "person_id", "drug_concept_id", "unit_concept_id", "dose_value",
"dose_era_start_date", "dose_era_end_date"]
def table_name(self):
return "dose_era"
class VocabularyObject(OutputClass):
def fields(self):
return ["vocabulary_id", "vocabulary_name", "vocabulary_reference", "vocabulary_version",
"vocabulary_concept_id"]
def table_name(self):
return "vocabulary"
class ConceptAncestorObject(OutputClass):
def fields(self):
return ["ancestor_concept_id", "descendant_concept_id", "min_levels_of_separation",
"max_levels_of_separation"]
def table_name(self):
return "concept_ancestor"
class ConceptSynonymObject(OutputClass):
def fields(self):
return ["concept_id", "concept_synonym_name", "language_concept_id"]
def table_name(self):
return "concept_synonym"
class NoteObject(OutputClass):
def fields(self):
return ["note_id", "person_id", "note_date", "note_datetime", "note_type_concept_id",
"note_class_concept_id", "note_title", "note_text", "encoding_concept_id", "language_concept_id",
"provider_id", "visit_occurrence_id", "note_source_value"]
def table_name(self):
return "note"
class ProcedureOccurrenceObject(OutputClass):
def fields(self):
return ["procedure_occurrence_id", "person_id", "procedure_concept_id", "procedure_date",
"procedure_datetime", "procedure_type_concept_id", "modifier_concept_id", "quantity", "provider_id",
"visit_occurrence_id", "procedure_source_value", "procedure_source_concept_id",
"qualifier_source_value"]
def table_name(self):
return "procedure_occurrence"
class ConditionEraObject(OutputClass):
def fields(self):
return ["condition_era_id", "person_id", "condition_concept_id", "condition_era_start_date",
"condition_era_end_date", "condition_occurrence_count"]
def table_name(self):
return "condition_era"
class ProviderObject(OutputClass):
def fields(self):
return ["provider_id", "provider_name", "NPI", "DEA", "specialty_concept_id", "care_site_id",
"year_of_birth", "gender_concept_id", "provider_source_value", "specialty_source_value",
"specialty_source_concept_id", "gender_source_value", "gender_source_concept_id"]
def table_name(self):
return "provider"
class CdmSourceObject(OutputClass):
def fields(self):
return ["cdm_source_name", "cdm_source_abbreviation", "cdm_holder", "source_description",
"source_documentation_reference", "cdm_etl_reference", "source_release_date", "cdm_release_date",
"cdm_version", "vocabulary_version"]
def table_name(self):
return "cdm_source"
class AttributeDefinitionObject(OutputClass):
def fields(self):
return ["attribute_definition_id", "attribute_name", "attribute_description", "attribute_type_concept_id",
"attribute_syntax"]
def table_name(self):
return "attribute_definition"
class LocationObject(OutputClass):
def fields(self):
return ["location_id", "address_1", "address_2", "city", "state", "zip", "county", "location_source_value"]
def table_name(self):
return "location"
class RelationshipObject(OutputClass):
def fields(self):
return ["relationship_id", "relationship_name", "is_hierarchical", "defines_ancestry",
"reverse_relationship_id", "relationship_concept_id"]
def table_name(self):
return "relationship"
class DrugEraObject(OutputClass):
def fields(self):
return ["drug_era_id", "person_id", "drug_concept_id", "drug_era_start_date", "drug_era_end_date",
"drug_exposure_count", "gap_days"]
def table_name(self):
return "drug_era"
class SpecimenObject(OutputClass):
def fields(self):
return ["specimen_id", "person_id", "specimen_concept_id", "specimen_type_concept_id", "specimen_date",
"specimen_datetime", "quantity", "unit_concept_id", "anatomic_site_concept_id",
"disease_status_concept_id", "specimen_source_id", "specimen_source_value", "unit_source_value",
"anatomic_site_source_value", "disease_status_source_value"]
def table_name(self):
return "specimen"
class ConceptClassObject(OutputClass):
def fields(self):
return ["concept_class_id", "concept_class_name", "concept_class_concept_id"]
def table_name(self):
return "concept_class"
class ConditionOccurrenceObject(OutputClass):
def fields(self):
return ["condition_occurrence_id", "person_id", "condition_concept_id", "condition_start_date",
"condition_start_datetime", "condition_end_date", "condition_end_datetime",
"condition_type_concept_id", "stop_reason", "provider_id", "visit_occurrence_id",
"condition_source_value", "condition_source_concept_id", "condition_status_source_value",
"condition_status_concept_id"]
def table_name(self):
return "condition_occurrence"
class CareSiteObject(OutputClass):
def fields(self):
return ["care_site_id", "care_site_name", "place_of_service_concept_id", "location_id",
"care_site_source_value", "place_of_service_source_value"]
def table_name(self):
return "care_site"
class ObservationObject(OutputClass):
def fields(self):
return ["observation_id", "person_id", "observation_concept_id", "observation_date", "observation_datetime",
"observation_type_concept_id", "value_as_number", "value_as_string", "value_as_concept_id",
"qualifier_concept_id", "unit_concept_id", "provider_id", "visit_occurrence_id",
"observation_source_value", "observation_source_concept_id", "unit_source_value",
"qualifier_source_value"]
def table_name(self):
return "observation"
class CohortDefinitionObject(OutputClass):
def fields(self):
return ["cohort_definition_id", "cohort_definition_name", "cohort_definition_description",
"definition_type_concept_id", "cohort_definition_syntax", "subject_concept_id",
"cohort_initiation_date"]
def table_name(self):
return "cohort_definition"
class SourceToConceptMapObject(OutputClass):
def fields(self):
return ["source_code", "source_concept_id", "source_vocabulary_id", "source_code_description",
"target_concept_id", "target_vocabulary_id", "valid_start_date", "valid_end_date", "invalid_reason"]
def table_name(self):
return "source_to_concept_map"
class PersonObject(OutputClass):
def fields(self):
return ["person_id", "gender_concept_id", "year_of_birth", "month_of_birth", "day_of_birth",
"birth_datetime", "race_concept_id", "ethnicity_concept_id", "location_id", "provider_id",
"care_site_id", "person_source_value", "gender_source_value", "gender_source_concept_id",
"race_source_value", "race_source_concept_id", "ethnicity_source_value",
"ethnicity_source_concept_id"]
def table_name(self):
return "person"
class NoteNlpObject(OutputClass):
def fields(self):
return ["note_nlp_id", "note_id", "section_concept_id", "snippet", "offset", "lexical_variant",
"note_nlp_concept_id", "note_nlp_source_concept_id", "nlp_system", "nlp_date", "nlp_datetime",
"term_exists", "term_temporal", "term_modifiers"]
def table_name(self):
return "note_nlp"
class DrugExposureObject(OutputClass):
def fields(self):
return ["drug_exposure_id", "person_id", "drug_concept_id", "drug_exposure_start_date",
"drug_exposure_start_datetime", "drug_exposure_end_date", "drug_exposure_end_datetime",
"verbatim_end_date", "drug_type_concept_id", "stop_reason", "refills", "quantity", "days_supply",
"sig", "route_concept_id", "lot_number", "provider_id", "visit_occurrence_id", "drug_source_value",
"drug_source_concept_id", "route_source_value", "dose_unit_source_value"]
def table_name(self):
return "drug_exposure"
class CohortAttributeObject(OutputClass):
def fields(self):
return ["cohort_definition_id", "cohort_start_date", "cohort_end_date", "subject_id",
"attribute_definition_id", "value_as_number", "value_as_concept_id"]
def table_name(self):
return "cohort_attribute"
| 37.603261
| 116
| 0.686949
|
4a02981b4b3acdbd49c0e08c541a92828b219d75
| 2,804
|
py
|
Python
|
docs/tools/github.py
|
Schnappi618/ClickHouse
|
a87e1124b5711890c38c06b30b6b3741121ad094
|
[
"Apache-2.0"
] | null | null | null |
docs/tools/github.py
|
Schnappi618/ClickHouse
|
a87e1124b5711890c38c06b30b6b3741121ad094
|
[
"Apache-2.0"
] | null | null | null |
docs/tools/github.py
|
Schnappi618/ClickHouse
|
a87e1124b5711890c38c06b30b6b3741121ad094
|
[
"Apache-2.0"
] | null | null | null |
import collections
import copy
import io
import logging
import os
import sys
import tarfile
import jinja2
import requests
import util
def choose_latest_releases():
logging.info('Collecting release candidates')
seen = collections.OrderedDict()
candidates = []
for page in range(1, 10):
url = 'https://api.github.com/repos/ClickHouse/ClickHouse/tags?per_page=100&page=%d' % page
candidates += requests.get(url).json()
logging.info('Collected all release candidates')
for tag in candidates:
if isinstance(tag, dict):
name = tag.get('name', '')
is_unstable = ('stable' not in name) and ('lts' not in name)
is_in_blacklist = ('v18' in name) or ('prestable' in name) or ('v1.1' in name)
if is_unstable or is_in_blacklist:
continue
major_version = '.'.join((name.split('.', 2))[:2])
if major_version not in seen:
seen[major_version] = (name, tag.get('tarball_url'),)
if len(seen) > 10:
break
else:
logging.fatal('Unexpected GitHub response: %s', str(candidates))
sys.exit(1)
logging.info('Found stable releases: %s', str(seen.keys()))
return seen.items()
def process_release(args, callback, release):
name, (full_name, tarball_url,) = release
logging.info('Building docs for %s', full_name)
buf = io.BytesIO(requests.get(tarball_url).content)
tar = tarfile.open(mode='r:gz', fileobj=buf)
with util.temp_dir() as base_dir:
tar.extractall(base_dir)
args = copy.copy(args)
args.version_prefix = name
args.is_stable_release = True
args.docs_dir = os.path.join(base_dir, os.listdir(base_dir)[0], 'docs')
try:
callback(args)
except jinja2.exceptions.TemplateSyntaxError:
args.no_docs_macros = True
callback(args)
def build_releases(args, callback):
for release in args.stable_releases:
process_release(args, callback, release)
def get_events(args):
events = []
skip = True
with open(os.path.join(args.docs_dir, '..', 'README.md')) as f:
for line in f:
if skip:
if 'Upcoming Events' in line:
skip = False
else:
if not line:
continue
line = line.strip().split('](')
if len(line) == 2:
tail = line[1].split(') ')
events.append({
'signup_link': tail[0],
'event_name': line[0].replace('* [', ''),
'event_date': tail[1].replace('on ', '').replace('.', '')
})
return events
| 32.229885
| 99
| 0.560271
|
4a0298a28b9e06066c66617ce17c034d2d130683
| 24,762
|
py
|
Python
|
myslideslive/slideslive.py
|
So-Cool/myslideslive
|
7b5a02d2421d283114f99ec18a070573c0ff1680
|
[
"BSD-3-Clause"
] | 1
|
2021-09-27T12:14:14.000Z
|
2021-09-27T12:14:14.000Z
|
myslideslive/slideslive.py
|
So-Cool/myslideslive
|
7b5a02d2421d283114f99ec18a070573c0ff1680
|
[
"BSD-3-Clause"
] | 1
|
2021-11-11T03:42:21.000Z
|
2021-11-11T05:51:08.000Z
|
myslideslive/slideslive.py
|
So-Cool/myslideslive
|
7b5a02d2421d283114f99ec18a070573c0ff1680
|
[
"BSD-3-Clause"
] | 2
|
2021-08-15T10:04:34.000Z
|
2021-09-27T12:14:19.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: src/slideslive.ipynb (unless otherwise specified).
__all__ = ['SL_REGEX', 'SL_INFO', 'SL_HTML', 'SL_CDN', 'YODA_CDN', 'url2id', 'get_sl_info', 'parse_slide_xml',
'get_slide_metadata', 'get_urls', 'download_slides', 'ffmpeg_concat_script', 'compose_ffmpeg_video',
'SlidesLive']
# Cell
#export
import json
import os
import re
import requests
import tempfile
import time
import warnings
from lxml.etree import HTML
from xml.etree import ElementTree
# Cell
#export
# Parse SlidesLive URL
_SL_REGEX_STR = ('https://slideslive\\.(?:com|de)/'
'(?P<id>\\d+)'
'/*'
'(?P<name>.*)')
SL_REGEX = re.compile(_SL_REGEX_STR)
# SL INFO JSON
SL_INFO = 'https://ben.slideslive.com/player/{id}?player_token={token}'
# SL HTML page
SL_HTML = 'https://slideslive.com/{id}'
# SL CDNs
SL_CDN = 'https://cdn.slideslive.com/data/presentations/{video_id}/slides/{slide_type}/{slide_id}.jpg'
YODA_CDN = 'https://d2ygwrecguqg66.cloudfront.net/data/presentations/{id}/{data}'
# e.g.: https://d2ygwrecguqg66.cloudfront.net/data/presentations/38956531/slides/big/00793.jpg
# https://d2ygwrecguqg66.cloudfront.net/data/presentations/38956531/v1/38956531.xml
# https://d2ygwrecguqg66.cloudfront.net/data/presentations/38956531/v1/slides.json
# Cell
def url2id(sl_url):
"""Convers SlidesLive URL to presentation ID and name."""
sl_url_match = SL_REGEX.search(sl_url)
if sl_url_match is None or not sl_url_match.group('id'):
raise Exception('Could not parse the SlidesLive URL.')
return sl_url_match.group('id'), sl_url_match.group('name')
# Cell
def get_sl_info(sl_id):
"""Pulls information about a SlidesLive presentation."""
if (not isinstance(sl_id, int)
and (isinstance(sl_id, str) and not sl_id.isdecimal())):
raise TypeError('Incorrect SlidesLive ID format.')
# get player token
html_source_url = SL_HTML.format(id=sl_id)
html_source_request = requests.get(html_source_url)
html_source = HTML(html_source_request.content.decode())
sl_token = html_source.xpath('//div[@data-player-token]/@data-player-token')
if not isinstance(sl_token, list) or len(sl_token) != 1:
raise RuntimeError('Could not retrieve the data player token. '
'Please report this error.')
sl_token = sl_token[0]
info_url = SL_INFO.format(id=sl_id, token=sl_token)
info_request = requests.get(info_url)
info_json = json.loads(info_request.content.decode())
return info_json
# Cell
#hide
def parse_slide_xml(xml, mode='string'):
"""
Parse the SlidesLive slide XML metadata.
`mode` can either be `string` or `file`.
"""
if mode not in ('string', 'file'):
raise ValueError('The xml parse mode can either be *string* or *file*.')
slide_properties = ['orderId', 'timeSec', 'time', 'slideName']
if mode == 'string':
xml_root = ElementTree.fromstring(xml)
else:
assert mode == 'file'
with open(xml, 'r') as f:
xml_tree = ElementTree.parse(f)
xml_root = xml_tree.getroot()
if xml_root.tag != 'videoContent':
raise RuntimeError(f'Cannot process this XML structure: {xml_root.tag}.')
slides = []
for node in xml_root:
if node is None:
continue
if node.tag != 'slide':
raise RuntimeError(f'Unexpected slide type: {node.tag}.')
slide = {}
for n in node:
if n.tag not in slide_properties:
raise RuntimeError(f'Unexpected slide specifier: {n.tag}.')
slide[n.tag] = n.text
slides.append(slide)
return slides
# Cell
def get_slide_metadata(sl_meta_url, approach='json'):
"""
Processes metadata of slides associated with a SlidesLive presentation.
`approach` is one of `json` or `xml`.
It specifies the strategy for extracting slide metadata.
"""
if approach not in ('xml', 'json'):
raise ValueError('The approach can either be *json* or *xml*.')
meta_request = requests.get(sl_meta_url)
meta_content = meta_request.content.decode()
if approach == 'json':
meta_data = json.loads(meta_content)
else:
assert approach == 'xml'
meta_data_ = parse_slide_xml(meta_content)
meta_data_ = {int(d['orderId']): {'time': int(d['time']),
'type': 'image',
'image': {'name': d['slideName']}}
for d in meta_data_}
meta_data = {'slides': [meta_data_[i] for i in sorted(meta_data_.keys())]}
return meta_data
# Cell
def get_urls(video_id, slide_meta, slide_type='big',
slide=(None, None), time=(None, None)):
"""
Composes a list of URLs for slides of a given SlidesLive presentation.
`video_id` specifies the ID of a SlidesLive presentation.
`slide_meta` is the metadata of a SlidesLive presentation
as given by the `get_slide_metadata` function.
`slide_type` specifies the size of the slide.
A subset of slides may be extracted with this function using either
the `slide` or `time` parameter (but not both simultaneously).
The `slide` parameter takes a range of slides to be extracted based
on the slide ID numbers visible in a SlidesLive presentation.
For example, `slide=(5, 7)` to extract slides 5--7, **inclusive**;
`slide=(5, None)` to extract from slide 5 **onwards**; or
`slide=(None, 6)` to extract up to slide 6 **inclusive**.
The `time` parameter takes a range of time (visible in a SlidesLive
presentation) for which slides are to be extracted.
For example, `time=(5, 10)` to extract slides starting at second 5
(**inclusive**) and ending before second 10 (**exclusive**);
`time=(5, None)` to extract from second 5 **onwards**; or
`time=(None, 50)` to extract up to second 60 **exclusive**.
"""
if not isinstance(slide, tuple) or len(slide) != 2:
raise TypeError('Numeric slide bound (slide) must be a 2-tuple.')
if not isinstance(time, tuple) or len(time) != 2:
raise TypeError('Time-based slide bound (time) must be a 2-tuple.')
slide_given = slide[0] is not None or slide[1] is not None
time_given = time[0] is not None or time[1] is not None
if slide_given and time_given:
raise RuntimeError('Both slide and time bounds cannot be used simultaneously.')
if 'slide_qualities' in slide_meta:
if slide_type not in slide_meta['slide_qualities']:
raise ValueError('The slide type (slide_type) is not recognised.')
slides = []
if slide_given:
lower_bound = -float('inf') if slide[0] is None else slide[0]
upper_bound = float('inf') if slide[1] is None else slide[1]
for i, s in enumerate(slide_meta['slides']):
i_ = i + 1
if i_ >= lower_bound and i_ <= upper_bound:
slides.append(SL_CDN.format(
video_id=video_id,
slide_type=slide_type,
slide_id=s['image']['name']))
elif time_given:
lower_bound = -float('inf') if time[0] is None else time[0]
upper_bound = float('inf') if time[1] is None else time[1]
s = slide_meta['slides']
for i in range(0, len(s) - 1):
t_start = int(s[i]['time'] / 1000) # inclusive
t_end = int(s[i + 1]['time'] / 1000) # exclusive
if t_start >= lower_bound and t_end <= upper_bound:
add_slide = True
elif (t_start < lower_bound and t_end > lower_bound
and t_end < upper_bound):
add_slide = True
elif (t_start < upper_bound and t_end > upper_bound
and t_start >= lower_bound):
add_slide = True
else:
add_slide = False
if add_slide:
slides.append(SL_CDN.format(
video_id=video_id,
slide_type=slide_type,
slide_id=s[i]['image']['name']))
else: # handle the last slide
t_start = int(s[i + 1]['time'] / 1000) # inclusive
t_end = None # exclusive
if t_start >= lower_bound and t_start < upper_bound:
slides.append(SL_CDN.format(
video_id=video_id,
slide_type=slide_type,
slide_id=s[i + 1]['image']['name']))
else:
slides = [SL_CDN.format(video_id=video_id,
slide_type=slide_type,
slide_id=s['image']['name'])
for s in slide_meta['slides']]
return slides
# Cell
def download_slides(url_list, sleep_time=.2, jobs=16,
directory=None, technique='python'):
"""
Downloads files from a list of URLs (`url_list`).
The destination directory is either `slides` created
in the current working directory, or a path specified
via the `directory` parameter.
Three different download strategies are supported:
* `technique='python'` -- downloads the images through
Python's `requests` library one by one, pausing for
`sleep_time` (`0.2` seconds, by default) after each
download.
* `technique='wget'` -- downloads the images by invoking
`wget` for each image in the list, pausing for
`sleep_time` (`0.2` seconds, by default) after each
download.
* `technique='wget+parallel'` -- downloads multiple images
simultaneously -- specified by the `jobs` parameter
(`16`, by default)-- by invoking `wget` thorugh `parallel`.
"""
if technique not in ('python', 'wget', 'wget+parallel'):
raise ValueError('The download `technique` should be one of: '
'python, wget, wget+parallel.')
if directory is None:
slides_dir = os.path.join(os.getcwd(), 'slides')
else:
slides_dir = directory
if os.path.exists(slides_dir):
if not os.path.isdir(slides_dir):
raise RuntimeError(
'The slides destination is a file '
f'and not adirectory.\n({slides_dir})')
else:
os.mkdir(slides_dir)
if technique in ('python', 'wget'):
for url in url_list:
fn = os.path.basename(url)
fn_path = os.path.join(slides_dir, fn)
if os.path.exists(fn_path):
if os.path.isfile(fn_path):
warnings.warn(f'File {fn_path} already exists; skipping download.')
else:
warnings.warn(f'The file path -- {fn_path} -- is a directory; '
'skipping download.')
else:
if technique == 'python':
with open(fn_path, 'wb') as f:
r = requests.get(url)
f.write(r.content)
else:
assert technique == 'wget'
stream = os.popen(f'wget -P {slides_dir} {url}')
print(stream.read())
time.sleep(sleep_time)
else:
assert technique == 'wget+parallel'
with tempfile.NamedTemporaryFile(mode='w') as parallel_file:
parallel_file.write('\n'.join(url_list))
parallel_file.seek(0)
stream = os.popen(f'parallel -j {jobs} wget -P {slides_dir} < {parallel_file.name}')
print(stream.read())
# Cell
def ffmpeg_concat_script(slide_meta, slide_folder=None, last_duration=None,
slide=(None, None), time=(None, None)):
"""
Builds an ffmpeg frame concatination string from slide metadata.
Since the duration of the very last slide cannot be inferred,
it lasts for a user-specified amount of time
(`last_diration`, `5` by default).
`slide_folder` specifies the location of the slide images.
By default, it is the `slides` folder in the current
working directory.
A subset of slides may be extracted with this function using either
the `slide` or `time` parameter (but not both simultaneously).
The `slide` parameter takes a range of slides to be extracted based
on the slide ID numbers visible in a SlidesLive presentation.
For example, `slide=(5, 7)` to extract slides 5--7, **inclusive**;
`slide=(5, None)` to extract from slide 5 **onwards**; or
`slide=(None, 6)` to extract up to slide 6 **inclusive**.
The `time` parameter takes a range of time (visible in a SlidesLive
presentation) for which slides are to be extracted.
For example, `time=(5, 10)` to extract slides starting at second 5
(**inclusive**) and ending before second 10 (**exclusive**);
`time=(5, None)` to extract from second 5 **onwards**; or
`time=(None, 50)` to extract up to second 60 **exclusive**.
"""
def _slide_exists(_slide_file):
_f = os.path.join(slide_folder, f"{_slide_file}.jpg")
_f = os.path.abspath(_f)
if not os.path.exists(_f) or not os.path.isfile(_f):
raise RuntimeError(f'{_f} file does not exist.')
return _f
if not isinstance(slide, tuple) or len(slide) != 2:
raise TypeError('Numeric slide bound (slide) must be a 2-tuple.')
if not isinstance(time, tuple) or len(time) != 2:
raise TypeError('Time-based slide bound (time) must be a 2-tuple.')
slide_given = slide[0] is not None or slide[1] is not None
time_given = time[0] is not None or time[1] is not None
if slide_given and time_given:
raise RuntimeError('Both slide and time bounds cannot be used simultaneously.')
if slide_folder is None:
slide_folder = os.path.join(os.getcwd(), 'slides')
if not os.path.exists(slide_folder) or not os.path.isdir(slide_folder):
raise ValueError(f'Given directory does not exist: {slide_folder}.')
ffmpeg = []
glob_start, glob_end = None, None
if slide_given:
lower_bound = -float('inf') if slide[0] is None else slide[0]
upper_bound = float('inf') if slide[1] is None else slide[1]
for i in range(len(slide_meta['slides']) - 1):
i_ = i + 1
if i_ >= lower_bound and i_ <= upper_bound:
t_start = slide_meta['slides'][i]['time']
t_end = slide_meta['slides'][i_]['time']
t_duration = (t_end - t_start) / 1000
f = _slide_exists(slide_meta['slides'][i]['image']['name'])
ffmpeg += [f"file '{f}'", f'duration {t_duration:.3f}']
glob_start = t_start / 1000 if glob_start is None else glob_start
glob_end = t_end / 1000
else:
i_ = i + 2
if i_ >= lower_bound and i_ <= upper_bound:
f = _slide_exists(slide_meta['slides'][i + 1]['image']['name'])
last_duration = 5 if last_duration is None else last_duration
ffmpeg += [f"file '{f}'", f'duration {last_duration:.3f}']
_glob = slide_meta['slides'][i + 1]['time']
glob_start = _glob / 1000 if glob_start is None else glob_start
glob_end = (_glob / 1000) + last_duration
elif time_given:
lower_bound = -float('inf') if time[0] is None else time[0]
upper_bound = float('inf') if time[1] is None else time[1]
for i in range(len(slide_meta['slides']) - 1):
t_start = int(slide_meta['slides'][i]['time'] / 1000) # inclusive
t_end = int(slide_meta['slides'][i + 1]['time'] / 1000) # exclusive
if t_start >= lower_bound and t_end <= upper_bound:
add_slide = True
t_start_ = slide_meta['slides'][i]['time']
t_end_ = slide_meta['slides'][i + 1]['time']
elif (t_start < lower_bound and t_end > lower_bound
and t_end < upper_bound):
add_slide = True
t_start_ = lower_bound * 1000
t_end_ = slide_meta['slides'][i + 1]['time']
elif (t_start < upper_bound and t_end > upper_bound
and t_start >= lower_bound):
add_slide = True
t_start_ = slide_meta['slides'][i]['time']
t_end_ = upper_bound * 1000
else:
add_slide = False
t_start_ = None
t_end_ = None
if add_slide:
f = _slide_exists(slide_meta['slides'][i]['image']['name'])
t_duration = (t_end_ - t_start_) / 1000
ffmpeg += [f"file '{f}'", f'duration {t_duration:.3f}']
glob_start = t_start_ / 1000 if glob_start is None else glob_start
glob_end = t_end_ / 1000
else: # handle the last slide
t_start = int(slide_meta['slides'][i + 1]['time'] / 1000) # inclusive
t_end = None # exclusive
t_start_ = slide_meta['slides'][i + 1]['time'] / 1000
if t_start >= lower_bound and t_start < upper_bound:
f = _slide_exists(slide_meta['slides'][i + 1]['image']['name'])
if upper_bound == float('inf'):
duration = 5 if last_duration is None else last_duration
else:
if last_duration is None:
duration = upper_bound - t_start
else:
if t_start + last_duration < upper_bound:
duration = last_duration
else:
duration = upper_bound - t_start
ffmpeg += [f"file '{f}'", f'duration {duration:.3f}']
glob_start = t_start_ if glob_start is None else glob_start
glob_end = t_start_ + duration
else:
for i in range(len(slide_meta['slides']) - 1):
i_ = i + 1
t_start = slide_meta['slides'][i]['time']
t_end = slide_meta['slides'][i_]['time']
t_duration = (t_end - t_start) / 1000
f = _slide_exists(slide_meta['slides'][i]['image']['name'])
ffmpeg += [f"file '{f}'", f'duration {t_duration:.3f}']
glob_start = t_start / 1000 if glob_start is None else glob_start
else:
f = _slide_exists(slide_meta['slides'][i + 1]['image']['name'])
last_duration = 5 if last_duration is None else last_duration
ffmpeg += [f"file '{f}'", f'duration {last_duration:.3f}']
glob_end = (slide_meta['slides'][i + 1]['time'] / 1000) + last_duration
# NOTE: the last image must be duplicated without duration due to a bug
# in ffmpeg (https://trac.ffmpeg.org/wiki/Slideshow)
if len(ffmpeg) > 1:
ffmpeg.append(ffmpeg[-2])
return '\n'.join(ffmpeg), glob_start, glob_end
# Cell
def compose_ffmpeg_video(ffmpeg_script, video_file=None):
"""
Builds video slides from an ffmpeg script using the
`ffmpeg -safe 0 -f concat -i ffmpeg_concat.txt -vsync vfr slides.mp4` command.
"""
if video_file is None:
video_file = 'slides.mp4'
if not video_file.endswith('.mp4'):
video_file += '.mp4'
if os.path.exists(video_file):
raise RuntimeError(f'{video_file} video file already exists.')
with tempfile.NamedTemporaryFile(mode='w') as tf:
tf.write(ffmpeg_script)
tf.seek(0)
# -pix_fmt yuv420p
stream = os.popen(f'ffmpeg -safe 0 -f concat -i {tf.name} -vsync vfr {video_file}')
print(stream.read())
# Cell
class SlidesLive():
"""
Simplifies SlidesLive interaction.
Should be initialised with SlidesLive presentation URL (`video_url`).
Optionally, a destination folder for downloading slides may be specified
(`slides_folder`).
See `url2id`, `get_sl_info` and `get_slide_metadata` for more details.
"""
def __init__(self, video_url, slides_folder=None):
"""Initialises SlidesLive."""
self.slides_dir = slides_folder
self.slides_video = None
self.slide = None
self.time = None
self.start_time = None
self.end_time = None
self.video_id, self.video_name = url2id(video_url)
self.video_description = get_sl_info(self.video_id)
if 'slides_json_url' in self.video_description:
meta = get_slide_metadata(
self.video_description['slides_json_url'], approach='json')
else:
meta = get_slide_metadata(
self.video_description['slides_xml_url'], approach='xml')
self.video_metadata = meta
def get_slide_urls(self, slide_type='big', slide=None, time=None):
"""Returns a list of slide URLs -- see `get_urls` for more details."""
if self.slide is None and slide is None:
self.slide = (None, None)
elif self.slide is None and slide is not None:
self.slide = slide
elif self.slide is not None and slide is None:
pass
elif self.slide is not None and slide is not None:
self.slide = slide
if self.time is None and time is None:
self.time = (None, None)
elif self.time is None and time is not None:
self.time = time
elif self.time is not None and time is None:
pass
elif self.time is not None and time is not None:
self.time = time
return get_urls(self.video_id, self.video_metadata,
slide_type=slide_type,
slide=self.slide, time=self.time)
def download_slides(self, slide_type='big', slide=None, time=None,
sleep_time=.2, jobs=16, directory=None, technique='python'):
"""Downloads a collection of slides -- see `get_urls` and `download_slide` for more details."""
if directory is not None:
self.slides_dir = directory
elif self.slides_dir is None:
self.slides_dir = self.video_id
url_list = self.get_slide_urls(slide_type=slide_type,
slide=slide, time=time)
download_slides(url_list, sleep_time=sleep_time, jobs=jobs,
directory=self.slides_dir, technique=technique)
def get_ffmpeg_script(self, slide_folder=None, last_duration=None,
slide=None, time=None):
"""Composes ffmpeg script -- see `ffmpeg_concat_script` for more details."""
if slide_folder is not None:
self.slides_dir = slide_folder
elif self.slides_dir is None:
self.slides_dir = self.video_id
if self.slide is None and slide is None:
self.slide = (None, None)
elif self.slide is None and slide is not None:
self.slide = slide
elif self.slide is not None and slide is None:
pass
elif self.slide is not None and slide is not None:
self.slide = slide
if self.time is None and time is None:
self.time = (None, None)
elif self.time is None and time is not None:
self.time = time
elif self.time is not None and time is None:
pass
elif self.time is not None and time is not None:
self.time = time
return ffmpeg_concat_script(self.video_metadata, slide_folder=self.slides_dir,
last_duration=last_duration, slide=self.slide, time=self.time)
def compose_video(self, video_file=None,
slide_folder=None, last_duration=None,
slide=None, time=None):
"""Builds slides video -- see `ffmpeg_concat_script` and `compose_ffmpeg_video` for more details."""
if video_file is not None:
self.slides_video = video_file
elif self.slides_dir is None and self.slides_video is None:
self.slides_video = f'{self.video_id}.mp4'
elif self.slides_dir is not None and self.slides_video is None:
self.slides_video = f'{self.slides_dir}.mp4'
if slide_folder is not None:
self.slides_dir = slide_folder
elif slide_folder is None and self.slides_dir is None:
self.slides_dir = self.video_id
ffmpeg_script, self.start_time, self.end_time = self.get_ffmpeg_script(
slide_folder=self.slides_dir, last_duration=last_duration,
slide=slide, time=time)
compose_ffmpeg_video(ffmpeg_script, video_file=self.slides_video)
print(f'\n\nExtracted time segment in seconds:\n {self.start_time}--{self.end_time}')
| 41.477387
| 111
| 0.598175
|
4a0299131bc15c683e45abe3af7b130c7603b24a
| 12,664
|
py
|
Python
|
mars/api.py
|
xccheng/mars
|
8146d1b7d3f3bc2a652c414a336a2f884a06a108
|
[
"Apache-2.0"
] | 1
|
2020-11-05T05:53:00.000Z
|
2020-11-05T05:53:00.000Z
|
mars/api.py
|
xccheng/mars
|
8146d1b7d3f3bc2a652c414a336a2f884a06a108
|
[
"Apache-2.0"
] | null | null | null |
mars/api.py
|
xccheng/mars
|
8146d1b7d3f3bc2a652c414a336a2f884a06a108
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import random
import time
import uuid
from .actors import new_client, ActorNotExist
from .errors import GraphNotExists
from .scheduler import SessionActor, GraphActor, GraphMetaActor, ResourceActor, \
SessionManagerActor, ChunkMetaClient
from .scheduler.node_info import NodeInfoActor
from .scheduler.utils import SchedulerClusterInfoActor
from .worker.transfer import ResultSenderActor, ReceiverManagerActor
from .tensor.utils import slice_split
from .serialize import dataserializer
from .utils import tokenize, merge_chunks, arrow_array_to_objects
logger = logging.getLogger(__name__)
class MarsAPI(object):
def __init__(self, scheduler_ip):
self.__schedulers_cache = None
self._session_manager = None
self.actor_client = new_client()
self.cluster_info = self.actor_client.actor_ref(
SchedulerClusterInfoActor.default_uid(), address=scheduler_ip)
self.chunk_meta_client = ChunkMetaClient(self.actor_client, self.cluster_info)
@property
def session_manager(self):
if self._session_manager is None:
self._session_manager = self.get_actor_ref(SessionManagerActor.default_uid())
return self._session_manager
def get_schedulers(self):
if not self.__schedulers_cache:
self.__schedulers_cache = self.cluster_info.get_schedulers()
return self.__schedulers_cache
def get_scheduler(self, uid):
schedulers = self.get_schedulers()
if len(schedulers) == 1:
return schedulers[0]
else:
return self.cluster_info.get_scheduler(uid)
def get_actor_ref(self, uid):
return self.actor_client.actor_ref(uid, address=self.get_scheduler(uid))
def get_graph_meta_ref(self, session_id, graph_key):
graph_uid = GraphActor.gen_uid(session_id, graph_key)
graph_meta_uid = GraphMetaActor.gen_uid(session_id, graph_key)
graph_addr = self.get_scheduler(graph_uid)
return self.actor_client.actor_ref(graph_meta_uid, address=graph_addr)
def get_schedulers_info(self):
schedulers = self.get_schedulers()
infos = dict()
for scheduler in schedulers:
info_ref = self.actor_client.actor_ref(NodeInfoActor.default_uid(), address=scheduler)
infos[scheduler] = info_ref.get_info()
return infos
def count_workers(self):
try:
uid = ResourceActor.default_uid()
return self.get_actor_ref(uid).get_worker_count()
except KeyError:
return 0
def rescale_workers(self, new_scale, min_workers=None, wait=True, timeout=None):
min_workers = min_workers or new_scale
resource_ref = self.get_actor_ref(ResourceActor.default_uid())
endpoints = self.cluster_info.rescale_workers(new_scale)
check_start_time = time.time()
while wait:
if min_workers <= resource_ref.get_worker_count() <= new_scale:
break
self.actor_client.sleep(0.1)
if timeout and time.time() - check_start_time > timeout:
raise TimeoutError
return endpoints
def get_workers_meta(self):
resource_uid = ResourceActor.default_uid()
resource_ref = self.get_actor_ref(resource_uid)
return resource_ref.get_workers_meta()
def create_session(self, session_id, **kw):
self.session_manager.create_session(session_id, **kw)
def delete_session(self, session_id):
self.session_manager.delete_session(session_id)
def has_session(self, session_id):
"""
Check if the session with given session_id exists.
"""
return self.session_manager.has_session(session_id)
def submit_graph(self, session_id, serialized_graph, graph_key, target,
names=None, compose=True, wait=True):
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
session_ref.submit_tileable_graph(
serialized_graph, graph_key, target, names=names, compose=compose, _tell=not wait)
def create_mutable_tensor(self, session_id, name, shape, dtype, *args, **kwargs):
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
return session_ref.create_mutable_tensor(name, shape, dtype, *args, **kwargs)
def get_mutable_tensor(self, session_id, name):
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
return session_ref.get_mutable_tensor(name)
def send_chunk_records(self, session_id, name, chunk_records_to_send, directly=True):
from .worker.quota import MemQuotaActor
from .worker.transfer import put_remote_chunk
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
chunk_records = []
for chunk_key, endpoint, records in chunk_records_to_send:
record_chunk_key = tokenize(chunk_key, uuid.uuid4().hex)
# register quota
quota_ref = self.actor_client.actor_ref(MemQuotaActor.default_uid(), address=endpoint)
quota_ref.request_batch_quota({record_chunk_key: records.nbytes})
# send record chunk
receiver_manager_ref = self.actor_client.actor_ref(
ReceiverManagerActor.default_uid(), address=endpoint)
put_remote_chunk(session_id, record_chunk_key, records, receiver_manager_ref)
chunk_records.append((chunk_key, record_chunk_key))
# register the record chunk to MutableTensorActor
session_ref.append_chunk_records(name, chunk_records)
def seal(self, session_id, name):
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
return session_ref.seal(name)
def delete_graph(self, session_id, graph_key):
graph_uid = GraphActor.gen_uid(session_id, graph_key)
graph_ref = self.get_actor_ref(graph_uid)
graph_ref.destroy()
def stop_graph(self, session_id, graph_key):
from .scheduler import GraphState
graph_meta_ref = self.get_graph_meta_ref(session_id, graph_key)
graph_meta_ref.set_state(GraphState.CANCELLING)
graph_uid = GraphActor.gen_uid(session_id, graph_key)
graph_ref = self.get_actor_ref(graph_uid)
graph_ref.stop_graph()
def get_graph_state(self, session_id, graph_key):
from .scheduler import GraphState
graph_meta_ref = self.get_graph_meta_ref(session_id, graph_key)
try:
state_obj = graph_meta_ref.get_state()
state = state_obj.value if state_obj else 'preparing'
except ActorNotExist:
raise GraphNotExists
return GraphState(state.lower())
def get_graph_exc_info(self, session_id, graph_key):
graph_meta_ref = self.get_graph_meta_ref(session_id, graph_key)
try:
return graph_meta_ref.get_exc_info()
except ActorNotExist:
raise GraphNotExists
def wait_graph_finish(self, session_id, graph_key, timeout=None):
graph_meta_ref = self.get_graph_meta_ref(session_id, graph_key)
self.actor_client.actor_ref(graph_meta_ref.get_wait_ref()).wait(timeout)
def fetch_chunks_data(self, session_id, chunk_indexes, chunk_keys, nsplits,
index_obj=None, serial=True, serial_type=None,
compressions=None, pickle_protocol=None):
chunk_index_to_key = dict((index, key) for index, key in zip(chunk_indexes, chunk_keys))
if not index_obj:
chunk_results = dict((idx, self.fetch_chunk_data(session_id, k)) for
idx, k in zip(chunk_indexes, chunk_keys))
else:
chunk_results = dict()
indexes = dict()
for axis, s in enumerate(index_obj):
idx_to_slices = slice_split(s, nsplits[axis])
indexes[axis] = idx_to_slices
for chunk_index in itertools.product(*[v.keys() for v in indexes.values()]):
# slice_obj: use tuple, since numpy complains
#
# FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use
# `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array
# index, `arr[np.array(seq)]`, which will result either in an error or a different result.
slice_obj = tuple(indexes[axis][chunk_idx] for axis, chunk_idx in enumerate(chunk_index))
chunk_key = chunk_index_to_key[chunk_index]
chunk_results[chunk_index] = self.fetch_chunk_data(session_id, chunk_key, slice_obj)
chunk_results = [(idx, dataserializer.loads(f.result())) for
idx, f in chunk_results.items()]
if len(chunk_results) == 1:
ret = chunk_results[0][1]
else:
ret = merge_chunks(chunk_results)
if not serial:
return ret
compressions = max(compressions) if compressions else dataserializer.CompressType.NONE
if serial_type == dataserializer.SerialType.PICKLE:
ret = arrow_array_to_objects(ret)
return dataserializer.dumps(ret, serial_type=serial_type, compress=compressions,
pickle_protocol=pickle_protocol)
def fetch_data(self, session_id, graph_key, tileable_key, index_obj=None,
serial=True, serial_type=None, compressions=None, pickle_protocol=None):
graph_uid = GraphActor.gen_uid(session_id, graph_key)
graph_ref = self.get_actor_ref(graph_uid)
nsplits, chunk_keys, chunk_indexes = graph_ref.get_tileable_metas([tileable_key])[0]
return self.fetch_chunks_data(session_id, chunk_indexes, chunk_keys, nsplits,
index_obj=index_obj, serial=serial, serial_type=serial_type,
compressions=compressions, pickle_protocol=pickle_protocol)
def fetch_chunk_data(self, session_id, chunk_key, index_obj=None):
endpoints = self.chunk_meta_client.get_workers(session_id, chunk_key)
if endpoints is None:
raise KeyError(f'Chunk key {chunk_key} not exist in cluster')
sender_ref = self.actor_client.actor_ref(ResultSenderActor.default_uid(),
address=random.choice(endpoints))
return sender_ref.fetch_data(session_id, chunk_key, index_obj, _wait=False)
def get_chunk_metas(self, session_id, chunk_keys):
return self.chunk_meta_client.batch_get_chunk_meta(session_id, chunk_keys)
def get_tileable_chunk_metas(self, session_id, tileable_key):
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
graph_ref = self.actor_client.actor_ref(
session_ref.get_graph_ref_by_tileable_key(tileable_key))
return graph_ref.get_tileable_chunk_metas([tileable_key])[0]
def delete_data(self, session_id, graph_key, tileable_key, wait=False):
graph_uid = GraphActor.gen_uid(session_id, graph_key)
graph_ref = self.get_actor_ref(graph_uid)
graph_ref.free_tileable_data(tileable_key, wait=wait, _tell=not wait)
def get_tileable_nsplits(self, session_id, graph_key, tileable_key):
# nsplits is essential for operator like `reshape` and shape can be calculated by nsplits
graph_uid = GraphActor.gen_uid(session_id, graph_key)
graph_ref = self.get_actor_ref(graph_uid)
return graph_ref.get_tileable_metas([tileable_key], filter_fields=['nsplits'])[0][0]
def get_tileable_key_by_name(self, session_id, name):
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
return session_ref.get_tileable_key(name)
| 45.884058
| 109
| 0.691646
|
4a0299733b858066401e09c8c033ed2d3fd8b1aa
| 2,845
|
py
|
Python
|
tests/test_spacingd.py
|
crtrentz/MONAI
|
355db48e46047a18e3bb9dbd83f424a8ad0a2622
|
[
"Apache-2.0"
] | null | null | null |
tests/test_spacingd.py
|
crtrentz/MONAI
|
355db48e46047a18e3bb9dbd83f424a8ad0a2622
|
[
"Apache-2.0"
] | null | null | null |
tests/test_spacingd.py
|
crtrentz/MONAI
|
355db48e46047a18e3bb9dbd83f424a8ad0a2622
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from monai.transforms import Spacingd
class TestSpacingDCase(unittest.TestCase):
def test_spacingd_3d(self):
data = {"image": np.ones((2, 10, 15, 20)), "image_meta": {"affine": np.eye(4)}}
spacing = Spacingd(keys="image", pixdim=(1, 2, 1.4))
res = spacing(data)
self.assertEqual(("image", "image_meta"), tuple(sorted(res)))
np.testing.assert_allclose(res["image"].shape, (2, 10, 8, 15))
np.testing.assert_allclose(res["image_meta"]["affine"], np.diag([1, 2, 1.4, 1.0]))
def test_spacingd_2d(self):
data = {"image": np.ones((2, 10, 20)), "image_meta": {"affine": np.eye(3)}}
spacing = Spacingd(keys="image", pixdim=(1, 2, 1.4))
res = spacing(data)
self.assertEqual(("image", "image_meta"), tuple(sorted(res)))
np.testing.assert_allclose(res["image"].shape, (2, 10, 10))
np.testing.assert_allclose(res["image_meta"]["affine"], np.diag((1, 2, 1)))
def test_interp_all(self):
data = {
"image": np.arange(20).reshape((2, 1, 10)),
"seg": np.ones((2, 1, 10)),
"image_meta": {"affine": np.eye(4)},
"seg_meta": {"affine": np.eye(4)},
}
spacing = Spacingd(keys=("image", "seg"), interp_order="nearest", pixdim=(1, 0.2,))
res = spacing(data)
self.assertEqual(("image", "image_meta", "seg", "seg_meta"), tuple(sorted(res)))
np.testing.assert_allclose(res["image"].shape, (2, 1, 46))
np.testing.assert_allclose(res["image_meta"]["affine"], np.diag((1, 0.2, 1, 1)))
def test_interp_sep(self):
data = {
"image": np.ones((2, 1, 10)),
"seg": np.ones((2, 1, 10)),
"image_meta": {"affine": np.eye(4)},
"seg_meta": {"affine": np.eye(4)},
}
spacing = Spacingd(keys=("image", "seg"), interp_order=("bilinear", "nearest"), pixdim=(1, 0.2,))
res = spacing(data)
self.assertEqual(("image", "image_meta", "seg", "seg_meta"), tuple(sorted(res)))
np.testing.assert_allclose(res["image"].shape, (2, 1, 46))
np.testing.assert_allclose(res["image_meta"]["affine"], np.diag((1, 0.2, 1, 1)))
if __name__ == "__main__":
unittest.main()
| 43.769231
| 105
| 0.608084
|
4a029a9c81093b8013f1ef982017e27094b9b606
| 3,477
|
py
|
Python
|
osbuild/inputs.py
|
PaulWay/osbuild
|
3731a323084896dd172733ac72f6b8f4ac42c318
|
[
"Apache-2.0"
] | 81
|
2019-08-05T15:43:27.000Z
|
2022-03-27T11:10:58.000Z
|
osbuild/inputs.py
|
PaulWay/osbuild
|
3731a323084896dd172733ac72f6b8f4ac42c318
|
[
"Apache-2.0"
] | 604
|
2019-07-29T09:34:19.000Z
|
2022-03-30T13:13:36.000Z
|
osbuild/inputs.py
|
PaulWay/osbuild
|
3731a323084896dd172733ac72f6b8f4ac42c318
|
[
"Apache-2.0"
] | 50
|
2019-07-28T19:35:45.000Z
|
2022-03-09T08:57:25.000Z
|
"""
Pipeline inputs
A pipeline input provides data in various forms to a `Stage`, like
files, OSTree commits or trees. The content can either be obtained
via a `Source` or have been built by a `Pipeline`. Thus an `Input`
is the bridge between various types of content that originate from
different types of sources.
The acceptable origin of the data is determined by the `Input`
itself. What types of input are allowed and required is determined
by the `Stage`.
To osbuild itself this is all transparent. The only data visible to
osbuild is the path. The input options are just passed to the
`Input` as is and the result is forwarded to the `Stage`.
"""
import abc
import hashlib
import json
import os
from typing import Dict, Optional, Tuple
from osbuild import host
from osbuild.util.types import PathLike
from .objectstore import StoreClient, StoreServer
class Input:
"""
A single input with its corresponding options.
"""
def __init__(self, name, info, origin: str, options: Dict):
self.name = name
self.info = info
self.origin = origin
self.refs = {}
self.options = options or {}
self.id = self.calc_id()
def add_reference(self, ref, options: Optional[Dict] = None):
self.refs[ref] = options or {}
self.id = self.calc_id()
def calc_id(self):
# NB: The input `name` is not included here on purpose since it
# is either prescribed by the stage itself and thus not actual
# parameter or arbitrary and chosen by the manifest generator
# and thus can be changed without affecting the contents
m = hashlib.sha256()
m.update(json.dumps(self.info.name, sort_keys=True).encode())
m.update(json.dumps(self.origin, sort_keys=True).encode())
m.update(json.dumps(self.refs, sort_keys=True).encode())
m.update(json.dumps(self.options, sort_keys=True).encode())
return m.hexdigest()
def map(self,
mgr: host.ServiceManager,
storeapi: StoreServer,
root: PathLike) -> Tuple[str, Dict]:
target = os.path.join(root, self.name)
os.makedirs(target)
args = {
# mandatory bits
"origin": self.origin,
"refs": self.refs,
"target": target,
# global options
"options": self.options,
# API endpoints
"api": {
"store": storeapi.socket_address
}
}
client = mgr.start(f"input/{self.name}", self.info.path)
reply = client.call("map", args)
path = reply["path"]
if not path.startswith(root):
raise RuntimeError(f"returned {path} has wrong prefix")
reply["path"] = os.path.relpath(path, root)
return reply
class InputService(host.Service):
"""Input host service"""
@abc.abstractmethod
def map(self, store, origin, refs, target, options):
pass
def unmap(self):
pass
def stop(self):
self.unmap()
def dispatch(self, method: str, args, _fds):
if method == "map":
store = StoreClient(connect_to=args["api"]["store"])
r = self.map(store,
args["origin"],
args["refs"],
args["target"],
args["options"])
return r, None
raise host.ProtocolError("Unknown method")
| 28.5
| 71
| 0.605695
|
4a029ab518c1e0e3d2b714054b4229f5f7648d8b
| 600
|
py
|
Python
|
day-11-string-concatenation.py
|
roshansinghbisht/hello-python
|
595418a47e66217ed8759c91cdb535e8fa88412b
|
[
"MIT"
] | null | null | null |
day-11-string-concatenation.py
|
roshansinghbisht/hello-python
|
595418a47e66217ed8759c91cdb535e8fa88412b
|
[
"MIT"
] | null | null | null |
day-11-string-concatenation.py
|
roshansinghbisht/hello-python
|
595418a47e66217ed8759c91cdb535e8fa88412b
|
[
"MIT"
] | null | null | null |
# You’ll be provided with example data for a user, the time of their
# visit and the site they accessed. You should use the variables provided
# and the techniques you’ve learned to print a log message like this one
# (with the username, url, and timestamp replaced with values from the
# appropriate variables):
#
# Yogesh accessed the site http://petshop.com/pets/reptiles/pythons at
# 16:20.
username = "Ash"
timestamp = "06:45"
url = "http://petshop.com/pets/mammals/cats"
# TODO: print a log message using the variables above.
print(username, 'accessed the site', url, 'at', timestamp+'.')
| 35.294118
| 73
| 0.738333
|
4a029acef1d3e70c4ba772916ea991af5f8bec50
| 265
|
py
|
Python
|
setup.py
|
jlaine/auditwheel
|
f3c7691f258de76c11f679ab85826b730c658c14
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
jlaine/auditwheel
|
f3c7691f258de76c11f679ab85826b730c658c14
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
jlaine/auditwheel
|
f3c7691f258de76c11f679ab85826b730c658c14
|
[
"BSD-2-Clause"
] | 1
|
2022-03-04T14:09:27.000Z
|
2022-03-04T14:09:27.000Z
|
from setuptools import setup
extras = {
"test": ["pytest>=3.4", "jsonschema", "pypatchelf", "pretend", "docker"],
"coverage": ["pytest-cov"],
}
extras["develop"] = sum(extras.values(), [])
extras["coverage"] += extras["test"]
setup(extras_require=extras)
| 24.090909
| 77
| 0.641509
|
4a029b243262d7637e1d54d82d270173c68aae9c
| 2,406
|
py
|
Python
|
tests/analysis/manager.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
tests/analysis/manager.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
tests/analysis/manager.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the analysis plugin manager."""
import unittest
from plaso.analysis import interface
from plaso.analysis import manager
from tests import test_lib as shared_test_lib
class TestAnalysisPlugin(interface.AnalysisPlugin):
"""Test analysis plugin."""
NAME = 'test_plugin'
def CompileReport(self, mediator):
"""Compiles a report of the analysis.
After the plugin has received every copy of an event to
analyze this function will be called so that the report
can be assembled.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
"""
return
# pylint: disable=arguments-differ,unused-argument
def ExamineEvent(self, mediator, event, **unused_kwargs):
"""Analyzes an event object.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
event (EventObject): event.
"""
return
class AnalysisPluginManagerTest(shared_test_lib.BaseTestCase):
"""Tests for the analysis plugin manager."""
# pylint: disable=protected-access
def testPluginRegistration(self):
"""Tests the RegisterPlugin and DeregisterPlugin functions."""
number_of_plugins = len(manager.AnalysisPluginManager._plugin_classes)
manager.AnalysisPluginManager.RegisterPlugin(TestAnalysisPlugin)
self.assertEqual(
len(manager.AnalysisPluginManager._plugin_classes),
number_of_plugins + 1)
with self.assertRaises(KeyError):
manager.AnalysisPluginManager.RegisterPlugin(TestAnalysisPlugin)
manager.AnalysisPluginManager.DeregisterPlugin(TestAnalysisPlugin)
self.assertEqual(
len(manager.AnalysisPluginManager._plugin_classes),
number_of_plugins)
def testGetPlugins(self):
"""Tests the GetPlugins function."""
manager.AnalysisPluginManager.RegisterPlugin(TestAnalysisPlugin)
# Use set-comprehension to create a set of the analysis plugin names.
plugin_set = {name for name, _ in list(
manager.AnalysisPluginManager.GetPlugins())}
self.assertTrue('test_plugin' in plugin_set)
manager.AnalysisPluginManager.DeregisterPlugin(TestAnalysisPlugin)
# TODO: add tests for GetPluginNames.
if __name__ == '__main__':
unittest.main()
| 29.703704
| 74
| 0.741895
|
4a029c87cc996c6d6b66ef634e2ce8d2ae388296
| 4,114
|
py
|
Python
|
plugins/module_utils/linode/instance.py
|
muradm/ansible-linode-cloud
|
dea06d529cf4725f9ee631de844c905614aa5c9b
|
[
"Apache-2.0"
] | 3
|
2021-01-25T15:10:35.000Z
|
2021-09-19T20:39:16.000Z
|
plugins/module_utils/linode/instance.py
|
muradm/ansible-linode-cloud
|
dea06d529cf4725f9ee631de844c905614aa5c9b
|
[
"Apache-2.0"
] | 1
|
2021-03-02T16:24:31.000Z
|
2021-03-02T17:27:17.000Z
|
plugins/module_utils/linode/instance.py
|
muradm/ansible-linode-cloud
|
dea06d529cf4725f9ee631de844c905614aa5c9b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, muradm <mail@muradm.net>
# Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from copy import deepcopy
from datetime import datetime
from .client import linode_wait_for_status
from .error import linode_raise_client_error
from .util import _filter_dict_keys, _update_if_needed
def instance_find(client, label):
from linode_api4 import Instance
try:
return client.linode.instances(Instance.label == label)[0]
except IndexError:
return None
except Exception as e:
linode_raise_client_error(e)
def instance_create(client, args, check_mode=False):
non_optional = ['region', 'type', 'image', 'label', 'authorized_keys']
remaining = _filter_dict_keys(args, non_optional)
try:
if not check_mode:
response = client.linode.instance_create(
ltype=args['type'],
region=args['region'],
image=args['image'],
authorized_keys=args['authorized_keys'],
label=args['label'],
**remaining
)
if isinstance(response, tuple):
instance, root_pass = response
result = deepcopy(instance._raw_json)
result['root_pass'] = root_pass
else:
instance = response
result = deepcopy(instance._raw_json)
if 'ipv4_public_rdns' in args:
instance.ips.ipv4.public[0].rdns = '' if not args['ipv4_public_rdns'] else args['ipv4_public_rdns']
instance.ips.ipv4.public[0].save()
linode_wait_for_status(instance, "running")
else:
result = _fake_instance(args)
return result
except Exception as e:
linode_raise_client_error(e)
def instance_update(client, instance, args, check_mode=False):
result = deepcopy(instance._raw_json)
updated = False
try:
updated = updated or _update_if_needed(
instance, result, args, 'group', check_mode)
updated = updated or _update_if_needed(
instance, result, args, 'tags', check_mode, to_be_sorted=True)
if updated and not check_mode:
instance.save()
if 'private_ip' in args and args['private_ip']:
if len(instance.ips.ipv4.private) == 0:
updated = True
if not check_mode:
client.networking.ip_allocate(instance, public=False)
if 'ipv4_public_rdns' in args:
cur = instance.ips.ipv4.public[0].rdns
if cur != args['ipv4_public_rdns']:
updated = True
if not check_mode:
instance.ips.ipv4.public[0].rdns = '' if not args['ipv4_public_rdns'] else args['ipv4_public_rdns']
instance.ips.ipv4.public[0].rdns.save()
return (updated, result)
except Exception as e:
linode_raise_client_error(e)
def instance_remove(instance, check_mode=False):
try:
if not check_mode:
instance.delete()
return {'status': 'deleted'}
except Exception as e:
linode_raise_client_error(e)
def _fake_instance(args):
return {
'alerts': {},
'backups': {},
'created': datetime.now().isoformat(),
'hypervisor': 'kvm',
'id': -1,
'ipv4': ["0.0.0.0"],
'ipv6': '0000:0000::0000:0000:0000:0000/64',
'root_pass': 'check_mode' if 'root_pass' not in args else args['root_pass'],
'specs': {},
'status': 'provisioning',
'updated': datetime.now().isoformat(),
'watchdog_enabled': True,
'region': args['region'],
'ltype': args['type'],
'image': args['image'],
'label': args['label'],
'group': '' if 'group' not in args else args['group'],
'tags': [] if 'tags' not in args else args['tags'],
'authorized_keys': args['authorized_keys'],
}
| 31.646154
| 119
| 0.592854
|
4a029cc29604fe4c4140eb677d0343388b2cb266
| 35,464
|
py
|
Python
|
tests/test_unit_pyosirix.py
|
osirixgrpc/pyosirix
|
8b738728192d877352832467efc9462d6ce7562a
|
[
"BSD-3-Clause"
] | 1
|
2021-11-08T08:35:21.000Z
|
2021-11-08T08:35:21.000Z
|
tests/test_unit_pyosirix.py
|
osirixgrpc/pyosirix
|
8b738728192d877352832467efc9462d6ce7562a
|
[
"BSD-3-Clause"
] | 2
|
2022-02-10T10:53:17.000Z
|
2022-02-14T11:45:14.000Z
|
tests/test_unit_pyosirix.py
|
osirixgrpc/pyosirix
|
8b738728192d877352832467efc9462d6ce7562a
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import sys
import os
from osirix import ViewerController
import grpc
import pytest
import numpy as np
import matplotlib.pyplot as pl
from osirix.osirix_utils import Osirix, OsirixService
from osirix.Exceptions import GrpcException
import osirix.pb2.osirix_pb2 as osirix_pb2
import osirix.pb2.utilities_pb2 as utilities_pb2
import osirix.pb2.types_pb2 as types_pb2
import osirix.pb2.viewercontroller_pb2 as viewercontroller_pb2
import osirix.pb2.vrcontroller_pb2 as vrcontroller_pb2
import osirix.pb2.dcmpix_pb2 as dcmpix_pb2
import osirix.pb2.roi_pb2 as roi_pb2
import osirix.pb2.roivolume_pb2 as roivolume_pb2
import osirix.pb2.osirix_pb2_grpc as osirix_pb2_grpc
# The test defined here will require:
# ===================================
# 1) OsiriX/Horos application open with plugin installed
# 2) A single viewer controller open, with multiple frames
# 3) At least one ROI called test_grpc
# 4) A 3D renedered window open
class GrpcTest(unittest.TestCase):
"""Base test class
"""
def setUp(self):
try:
# Check that the port and domain that your Osirix/Horos
port = 50051
domain = "localhost:"
server_url_localhost = 'localhost:' + str(port)
channel_opt = [('grpc.max_send_message_length', 512 * 1024 * 1024),
('grpc.max_receive_message_length', 512 * 1024 * 1024)]
self.osirix_service = OsirixService(channel_opt=channel_opt, domain=domain, port=port)
self.osirix = Osirix(self.osirix_service.get_service())
self.channel = grpc.insecure_channel(server_url_localhost, options=channel_opt)
self.stub = osirix_pb2_grpc.OsiriXServiceStub(self.channel)
except:
raise GrpcException("No connection to OsiriX can be established")
class PyOsirixTestOsirix(GrpcTest):
"""Test case for core osirix messaging
"""
def testOsirixCurrentBrowser(self):
browser_controller = self.osirix.current_browser()
response = self.stub.OsirixCurrentBrowser(utilities_pb2.Empty())
print(type(response))
print(response)
print(response.status)
print(response.status.status)
self.assertEqual(response.status.status, 1) # <-check for success
self.assertEqual(response.browser_controller.osirixrpc_uid, browser_controller.osirixrpc_uid.osirixrpc_uid)
def testOsirixFrontmostViewer(self):
frontmost_viewer = self.osirix.frontmost_viewer()
response = self.stub.OsirixFrontmostViewer(utilities_pb2.Empty())
print(response)
print(response.status)
print(response.status.status)
self.assertEqual(response.status.status, 1)
self.assertEqual(response.viewer_controller.osirixrpc_uid, frontmost_viewer.osirixrpc_uid.osirixrpc_uid)
def testOsirixDisplayed2DViewers(self):
displayed_2d_viewers = self.osirix.displayed_2d_viewers()
response = self.stub.OsirixDisplayed2DViewers(utilities_pb2.Empty())
print(response)
print(response.status)
print(response.status.status)
self.assertEqual(response.status.status, 1)
self.assertTrue(len(response.viewer_controllers) > 0)
self.assertTrue(len(displayed_2d_viewers) > 0)
self.assertEqual(len(response.viewer_controllers), len(displayed_2d_viewers))
def testOsirixFrontmostVRController(self):
frontmost_vr_controller = self.osirix.frontmost_vr_controller()
response = self.stub.OsirixFrontmostVRController(utilities_pb2.Empty())
print(response)
print(response.status)
print(response.status.status)
self.assertEqual(response.status.status, 1)
self.assertEqual(response.vr_controller.osirixrpc_uid, frontmost_vr_controller.osirixrpc_uid.osirixrpc_uid)
def testOsirixDisplayedVRControllers(self):
displayed_vr_controllers = self.osirix.displayed_vr_controllers()
response = self.stub.OsirixDisplayedVRControllers(utilities_pb2.Empty())
self.assertEqual(response.status.status, 1)
self.assertTrue(len(response.vr_controllers) > 0)
self.assertTrue(len(displayed_vr_controllers) > 0)
self.assertEqual(len(response.vr_controllers), len(displayed_vr_controllers))
class PyOsirixTestViewerController(GrpcTest):
"""Test case for ViewerController messaging
"""
def setUp(self):
super().setUp()
self.viewer_controller = self.stub.OsirixFrontmostViewer(utilities_pb2.Empty()).viewer_controller
self.viewer_controller_pyosirix = self.osirix.frontmost_viewer()
def testViewerControllerPixList(self):
pix_list = self.viewer_controller_pyosirix.pix_list(movie_idx=0)
request = viewercontroller_pb2.ViewerControllerPixListRequest(viewer_controller=self.viewer_controller,
movie_idx=0)
response = self.stub.ViewerControllerPixList(request)
print(response)
self.assertEqual(response.status.status, 1)
self.assertTrue(len(response.pix) > 0)
self.assertEqual(len(response.pix), len(pix_list))
def testViewerControllerNeedsDisplayUpdate(self):
self.viewer_controller_pyosirix.needs_display_update() # Check for response is build in and it returns none
response = self.stub.ViewerControllerNeedsDisplayUpdate(self.viewer_controller)
self.assertEqual(response.status.status, 1)
def testViewerControllerROIList(self):
roi_list = self.viewer_controller_pyosirix.roi_list(movie_idx=0)
request = viewercontroller_pb2.ViewerControllerROIListRequest(viewer_controller=self.viewer_controller,
movie_idx=0)
response = self.stub.ViewerControllerROIList(request)
self.assertEqual(response.status.status, 1)
self.assertTrue(len(response.roi_slices) > 0)
self.assertEqual(len(response.roi_slices), len(roi_list))
# print(response.roi_slices)
for roi_slice in response.roi_slices:
print(roi_slice)
# TODO add tests for setting new ROIs once the Type proto messages are exposed
def testViewerControllerNewROI_Mask(self):
buffer_array = np.random.randn(40 * 40) > 0
buffer = viewercontroller_pb2.ViewerControllerNewROIRequest.Buffer(buffer=1 * buffer_array, rows=40, columns=40)
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=255, g=0, b=200)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
movie_idx=0, position=0, itype=20, buffer=buffer,
color=color, opacity=0.5, name="random")
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
def testViewerControllerNewROI_Oval(self):
rect = viewercontroller_pb2.ViewerControllerNewROIRequest.Rect(origin_x=66., origin_y=42., width=20.,
height=10.)
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=255, g=100, b=200)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
movie_idx=0, position=0, itype=9, rectangle=rect,
color=color, opacity=0.5, name="oval",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
def testViewerControllerNewROI_Arrow(self):
# Points seem to go in order [head, tail]
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=0, g=255, b=0)
points = [viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=66., y=42.),
viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=99., y=24.)]
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
points=points, movie_idx=0, position=0, itype=14,
color=color, opacity=0.5, name="arrow",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
def testViewerControllerNewROI_Point(self):
rect = viewercontroller_pb2.ViewerControllerNewROIRequest.Rect(origin_x=66., origin_y=42., width=20.,
height=10.)
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=0, g=255, b=255)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
rectangle=rect, movie_idx=0, position=0, itype=19,
color=color, opacity=1.0, name="point",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
#
# # A rectangle...
def testViewerControllerNewROI_TROI(self):
rect = viewercontroller_pb2.ViewerControllerNewROIRequest.Rect(origin_x=66., origin_y=42., width=20.,
height=10.)
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=255, g=100, b=100)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
rectangle=rect, movie_idx=0, position=0, itype=6,
color=color, opacity=1.0, name="tROI",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
#
def testViewerControllerNewROI_Text(self):
rect = viewercontroller_pb2.ViewerControllerNewROIRequest.Rect(origin_x=66., origin_y=42., width=20.,
height=10.)
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=255, g=100, b=100)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
rectangle=rect, movie_idx=0, position=0, itype=13,
color=color, opacity=1.0, name="Some text",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
def testViewerControllerNewROI_TTAGT(self):
points = [[50.20499802, 32.32217407], [53.27367783, 38.77323914], [64.68674469, 25.43341637],
[69.71873474, 36.01180649], [41.8967247, 36.27430344], [68.91729736, 23.42099953]]
points = [viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=p[0], y=p[1]) for p in points]
print(len(points))
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=100, g=250, b=220)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
points=points, movie_idx=0, position=0, itype=29,
color=color, opacity=1.0, name="tTAGT",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
def testViewerControllerNewROI_Pencil(self):
points = [[50.20499802, 32.32217407], [53.27367783, 38.77323914], [64.68674469, 25.43341637],
[69.71873474, 36.01180649], [41.8967247, 36.27430344], [68.91729736, 23.42099953]]
points = [viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=p[0], y=p[1]) for p in points]
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=100, g=50, b=220)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
points=points, movie_idx=0, position=0, itype=15,
color=color, opacity=1.0, name="pencil",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
def testViewerControllerNewROI_Angle(self):
points = [viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=71., y=-2.), \
viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=67., y=11.), \
viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=90., y=9.)]
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=100, g=50, b=220)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
points=points, movie_idx=0, position=0, itype=12,
color=color, opacity=1.0, name="pencil",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
def testViewerControllerNewROI_Measure(self):
points = [viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=71., y=-2.), \
viewercontroller_pb2.ViewerControllerNewROIRequest.Point2D(x=67., y=11.)]
color = viewercontroller_pb2.ViewerControllerNewROIRequest.Color(r=100, g=50, b=0)
request = viewercontroller_pb2.ViewerControllerNewROIRequest(viewer_controller=self.viewer_controller,
points=points, movie_idx=0, position=0, itype=5,
color=color, opacity=1.0, name="measure",
thickness=3.0)
response = self.stub.ViewerControllerNewROI(request)
self.assertEqual(response.status.status, 1)
print(response.roi)
def testViewerControllerCurDCM(self):
cur_dcm = self.viewer_controller_pyosirix.cur_dcm()
response = self.stub.ViewerControllerCurDCM(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertNotEqual(response.pix.osirixrpc_uid, "")
self.assertEqual(response.pix.osirixrpc_uid, cur_dcm.osirixrpc_uid.osirixrpc_uid)
def testViewerControllerROIsWithName(self):
roi_with_names = self.viewer_controller_pyosirix.rois_with_name(name="test_grpc", movie_idx=0)
request = viewercontroller_pb2.ViewerControllerROIsWithNameRequest(viewer_controller=self.viewer_controller,
name="test_grpc", movie_idx=0)
response = self.stub.ViewerControllerROIsWithName(request)
self.assertEqual(response.status.status, 1)
self.assertTrue(len(response.rois) > 0)
self.assertNotEqual(response.rois[0].osirixrpc_uid, "")
self.assertTrue(len(roi_with_names) > 0)
self.assertEqual(len(response.rois), len(roi_with_names))
request = viewercontroller_pb2.ViewerControllerROIsWithNameRequest(viewer_controller=self.viewer_controller,
name="test_grpc", in_4d=True)
response = self.stub.ViewerControllerROIsWithName(request)
self.assertEqual(response.status.status, 1)
def testViewerControllerSelectedROIs(self):
selected_rois = self.viewer_controller_pyosirix.selected_rois()
response = self.stub.ViewerControllerSelectedROIs(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertEqual(len(selected_rois), len(response.rois))
def testViewerControllerResampleViewerController(self):
# Check that it doesn't work if bad viewer id provided.
viewer_controller_fake = types_pb2.ViewerController(osirixrpc_uid="bad_id")
request = viewercontroller_pb2.ViewerControllerResampleViewerControllerRequest(
viewer_controller=viewer_controller_fake, fixed_viewer_controller=self.viewer_controller)
#Resample by passing in the same viewer controller for testing of function
resampled_vc =self.viewer_controller_pyosirix.resample_viewer_controller(vc=self.viewer_controller_pyosirix)
response = self.stub.ViewerControllerResampleViewerController(request)
print(response)
self.assertEqual(response.status.status, 0)
self.assertIsNotNone(resampled_vc.osirixrpc_uid)
def testViewerControllerVRControllers(self):
vr_controllers = self.viewer_controller_pyosirix.vr_controllers()
response = self.stub.ViewerControllerVRControllers(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertEqual(len(vr_controllers), len(response.vr_controllers))
def testViewerControllerTitle(self):
title = self.viewer_controller_pyosirix.title
response = self.stub.ViewerControllerTitle(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertFalse(response.title == "")
self.assertEqual(response.title, title)
def testViewerControllerModality(self):
modality = self.viewer_controller_pyosirix.modality
response = self.stub.ViewerControllerModality(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertFalse(response.modality == "")
self.assertEqual(response.modality, modality)
def testViewerControllerSetMovieIdx(self):
request = viewercontroller_pb2.ViewerControllerSetMovieIdxRequest(viewer_controller=self.viewer_controller,
movie_idx=0)
response = self.stub.ViewerControllerSetMovieIdx(request)
self.assertEqual(response.status.status, 1)
self.viewer_controller_pyosirix.movie_idx = 0
movie_idx_value = self.viewer_controller_pyosirix.movie_idx
self.assertEqual(movie_idx_value, 0)
# @pytest.mark.dependency(depends=['TestViewerController::testViewerControllerSetMovieIdx'])
def testViewerControllerMovieIdx(self):
movie_idx = self.viewer_controller_pyosirix.movie_idx
response = self.stub.ViewerControllerMovieIdx(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertTrue(response.movie_idx == 0)
self.assertEqual(response.movie_idx, movie_idx)
def testViewerControllerIdx(self):
idx = self.viewer_controller_pyosirix.idx
response = self.stub.ViewerControllerIdx(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertTrue(response.idx == 0)
self.assertEqual(response.idx, idx)
def testViewerControllerSetWLWW(self):
wlww = (100, 200)
self.viewer_controller_pyosirix.wlww = wlww
wlww_value = self.viewer_controller_pyosirix.wlww
request = viewercontroller_pb2.ViewerControllerSetWLWWRequest(viewer_controller=self.viewer_controller, wl=100,
ww=200)
response = self.stub.ViewerControllerSetWLWW(request)
response2 = self.stub.ViewerControllerWLWW(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertEqual(response2.wl, wlww_value[0])
self.assertEqual(response2.ww, wlww_value[1])
def testViewerControllerWLWW(self):
wl, ww = self.viewer_controller_pyosirix.wlww
response = self.stub.ViewerControllerWLWW(self.viewer_controller)
self.assertEqual(response.status.status, 1)
self.assertTrue(response.wl == 100)
self.assertTrue(response.ww == 200)
self.assertEqual(response.wl, wl)
self.assertEqual(response.ww, ww)
class PyOsirixTestDCMPix(GrpcTest):
"""Test case for ViewerController messaging
"""
def setUp(self):
super().setUp()
self.viewer_controller = self.stub.OsirixFrontmostViewer(utilities_pb2.Empty()).viewer_controller
self.viewer_controller_pyosirix = self.osirix.frontmost_viewer()
self.pix_pyosirix = self.viewer_controller_pyosirix.cur_dcm()
self.pix = self.stub.ViewerControllerCurDCM(self.viewer_controller).pix
self.pix_list_pyosirix = self.viewer_controller_pyosirix.pix_list(movie_idx=0)
self.roi_pyosirix = self.viewer_controller_pyosirix.rois_with_name(name="test_grpc", movie_idx=0)[0]
roi_request = viewercontroller_pb2.ViewerControllerROIsWithNameRequest(viewer_controller=self.viewer_controller,
name="test_grpc", movie_idx=0)
self.roi = self.stub.ViewerControllerROIsWithName(roi_request).rois[0]
def testDCMPixConvertToRGB(self):
self.pix_pyosirix.convert_to_rgb() # Catches a failed response implicityl
def testDCMPixConvertToBW(self):
self.pix_pyosirix.convert_to_bw() # Catches a failed response implicityl
def testDCMPixIsRGB(self):
is_rgb = self.pix_pyosirix.is_rgb
response = self.stub.DCMPixIsRGB(self.pix)
self.assertEqual(response.status.status, 1)
print("Is RGB: %d" % (1 * response.is_rgb))
self.assertEqual(response.is_rgb, is_rgb)
def testDCMPixComputeROI(self):
roi_dict = self.pix_pyosirix.compute_roi(self.roi_pyosirix)
request = dcmpix_pb2.DCMPixComputeROIRequest(pix=self.pix, roi=self.roi)
response = self.stub.DCMPixComputeROI(request)
self.assertEqual(response.status.status, 1)
print(response)
print(response.mean)
self.assertEqual(response.mean, roi_dict["mean"])
def testDCMPixROIValues(self):
rows_py, columns_py, values_py = self.pix_pyosirix.get_roi_values(self.roi_pyosirix)
request = dcmpix_pb2.DCMPixROIValuesRequest(pix=self.pix, roi=self.roi)
response = self.stub.DCMPixROIValues(request)
self.assertEqual(response.status.status, 1)
print(response)
rows = np.array(response.row_indices)
columns = np.array(response.column_indices)
values = np.array(response.values)
print("Rows:\n", rows)
print("Columns:\n", columns)
print("Values:\n", values)
self.assertTrue(np.array_equal(rows, rows_py))
self.assertTrue(np.array_equal(columns, columns_py))
self.assertTrue(np.array_equal(values, values_py))
def testDCMPixShape(self):
rows, columns = self.pix_pyosirix.shape
response = self.stub.DCMPixShape(self.pix)
self.assertEqual(response.status.status, 1)
print("Rows:%d" % response.rows)
print("Columns: %d" % response.columns)
self.assertEqual(response.rows, rows)
self.assertEqual(response.columns, columns)
def testDCMPixSpacing(self):
rows, columns = self.pix_pyosirix.pixel_spacing
response = self.stub.DCMPixSpacing(self.pix)
self.assertEqual(response.status.status, 1)
print("Row spacing:%.2f" % response.spacing_rows)
print("Column spacing: %.2f" % response.spacing_columns)
self.assertEqual(response.spacing_rows, rows)
self.assertEqual(response.spacing_columns, columns)
def testDCMPixOrigin(self):
rows, columns, slices = self.pix_pyosirix.origin
response = self.stub.DCMPixOrigin(self.pix)
self.assertEqual(response.status.status, 1)
print("Row origin:%.2f" % response.origin_rows)
print("Column origin: %.2f" % response.origin_columns)
print("Slice origin: %.2f" % response.origin_slices)
self.assertEqual(response.origin_rows, rows)
self.assertEqual(response.origin_columns, columns)
self.assertEqual(response.origin_slices, slices)
def testDCMPixOrientation(self):
orientation = self.pix_pyosirix.orientation
response = self.stub.DCMPixOrientation(self.pix)
self.assertEqual(response.status.status, 1)
print("Orientation: ", np.array(response.orientation))
self.assertTrue(np.array_equal(np.array(response.orientation), orientation))
def testDCMPixSliceLocation(self):
location = self.pix_pyosirix.slice_location
response = self.stub.DCMPixSliceLocation(self.pix)
self.assertEqual(response.status.status, 1)
print("Slice location: ", response.slice_location)
self.assertEqual(location, response.slice_location)
def testDCMPixSourceFile(self):
source_file = self.pix_pyosirix.source_file
response = self.stub.DCMPixSourceFile(self.pix)
self.assertEqual(response.status.status, 1)
self.assertFalse(response.source_file == "")
print("Source file: %s" % response.source_file)
print(type(response))
self.assertEqual(response.source_file, source_file)
#TODO add these tests once the RPC for them have been added to osirix.proto
# def testDCMPixDicomImage(self):
# dicom_image = self.pix_pyosirix.image_obj()
# print(dicom_image.modality)
#
# def testDCMPixDicomSeries(self):
# dicom_image = self.pix_pyosirix.series_obj()
# print(dicom_image.modality)
#
# def testDCMPixDicomStudy(self):
# dicom_image = self.pix_pyosirix.study_object()
# print(dicom_image.modality)
def testDCMPixImage(self):
image_array = self.pix_pyosirix.image
response = self.stub.DCMPixImage(self.pix)
self.assertEqual(response.status.status, 1)
self.assertTrue(response.rows > 0)
self.assertTrue(response.columns > 0)
print("# rows/columns: %d/%d" % (response.rows, response.columns))
if response.is_argb:
self.assertEqual(len(response.image_data_argb), response.rows * response.columns * 4)
array = np.array(response.image_data_argb).reshape(response.rows, response.columns, 4)
print("Numpy output:\n", array)
self.assertTrue(np.array_equal(array, image_array))
else:
self.assertEqual(len(response.image_data_float), response.rows * response.columns)
array = np.array(response.image_data_float).reshape(response.rows, response.columns)
print("Numpy output:\n", array)
self.assertTrue(np.array_equal(array, image_array))
def testDCMPixSetImage(self):
response = self.stub.DCMPixImage(self.pix)
print(type(response.image_data_float))
request = viewercontroller_pb2.ViewerControllerPixListRequest(viewer_controller=self.viewer_controller,
movie_idx=0)
response_pix_list = self.stub.ViewerControllerPixList(request)
pix2 = response_pix_list.pix[20]
print(pix2)
response_dcm_pix2 = self.stub.DCMPixImage(pix2)
print(type(response_dcm_pix2.image_data_float))
if response_dcm_pix2.is_argb:
# request = dcmpix_pb2.DCMPixSetImageRequest(pix=self.pix, image_data_argb=response.image_data_argb)
request = dcmpix_pb2.DCMPixSetImageRequest(pix=pix2, image_data_argb=response_dcm_pix2.image_data_argb)
array = np.array(response_dcm_pix2.image_data_argb).reshape(response_dcm_pix2.rows, response_dcm_pix2.columns, 4)
self.pix_pyosirix.set_image(response_dcm_pix2.image_data_argb, response_dcm_pix2.is_argb)
else:
# request = dcmpix_pb2.DCMPixSetImageRequest(pix=self.pix, image_data_float=response.image_data_float)
request = dcmpix_pb2.DCMPixSetImageRequest(pix=pix2, image_data_float=response_dcm_pix2.image_data_float)
array = np.array(response.image_data_float).reshape(response.rows, response.columns)
self.pix_pyosirix.set_image(response_dcm_pix2.image_data_float, response_dcm_pix2.is_argb)
response = self.stub.DCMPixSetImage(request)
self.viewer_controller_pyosirix = self.osirix.frontmost_viewer()
self.viewer_controller_pyosirix.needs_display_update()
self.assertEqual(response.status.status, 1)
class PyOsirixTestROI(GrpcTest):
"""Test case for ROI messaging
"""
def setUp(self):
super().setUp()
self.viewer_controller_pyosirix = self.osirix.frontmost_viewer()
self.pix_pyosirix = self.viewer_controller_pyosirix.cur_dcm()
self.roi_pyosirix = self.viewer_controller_pyosirix.rois_with_name(name="test_grpc", movie_idx=0)[0]
self.viewer_controller = self.stub.OsirixFrontmostViewer(utilities_pb2.Empty()).viewer_controller
self.pix = self.stub.ViewerControllerCurDCM(self.viewer_controller).pix
roi_request = viewercontroller_pb2.ViewerControllerROIsWithNameRequest(viewer_controller=self.viewer_controller,
name="test_grpc", movie_idx=0)
self.roi = self.stub.ViewerControllerROIsWithName(roi_request).rois[0]
def testROIFlipHorizontally(self):
self.roi_pyosirix.flip_horizontally()
response = self.stub.ROIFlipHorizontally(self.roi)
self.assertEqual(response.status.status, 1)
def testROIFlipVertically(self):
self.roi_pyosirix.flip_vertically()
response = self.stub.ROIFlipVertically(self.roi)
self.assertEqual(response.status.status, 1)
def testROIArea(self):
area = self.roi_pyosirix.roi_area()
response = self.stub.ROIArea(self.roi)
self.assertEqual(response.status.status, 1)
print("ROI area: %f" % response.area)
self.assertEqual(area, response.area)
def testROICentroid(self):
x, y = self.roi_pyosirix.centroid
response = self.stub.ROICentroid(self.roi)
self.assertEqual(response.status.status, 1)
print("ROI centroid: x = %.2f, y = %.2f" % (response.x, response.y))
self.assertEqual(x, response.x)
self.assertEqual(y, response.y)
def testROIRotate(self):
x, y = self.roi_pyosirix.centroid
self.roi_pyosirix.rotate(theta=45, center=(x,y))
centroid_response = self.stub.ROICentroid(self.roi)
request = roi_pb2.ROIRotateRequest(roi=self.roi, degrees=45, x=centroid_response.x, y=centroid_response.y)
response = self.stub.ROIRotate(request)
self.assertEqual(response.status.status, 1)
def testROIMove(self):
x, y = self.roi_pyosirix.centroid
self.roi_pyosirix.roi_move(columns=30, rows=-10)
x_post, y_post = self.roi_pyosirix.centroid
centroid_response = self.stub.ROICentroid(self.roi)
request = roi_pb2.ROIMoveRequest(roi=self.roi, columns=30, rows=-10)
response = self.stub.ROIMove(request)
centroid_response_post = self.stub.ROICentroid(self.roi)
self.assertEqual(response.status.status, 1)
self.assertAlmostEqual(centroid_response_post.x - centroid_response.x, 30, places=2)
self.assertAlmostEqual(centroid_response_post.y - centroid_response.y, -10, places=2)
self.assertAlmostEqual(x_post - x, 30, places=2)
self.assertAlmostEqual(y_post - y, -10, places=2)
request = roi_pb2.ROIMoveRequest(roi=self.roi, columns=-30, rows=10)
def testROIPix(self):
dcm_pix = self.roi_pyosirix.pix
response = self.stub.ROIPix(self.roi)
self.assertEqual(response.pix, dcm_pix.osirixrpc_uid)
self.assertEqual(response.status.status, 1)
def testROIName(self):
name = self.roi_pyosirix.name
response = self.stub.ROIName(self.roi)
self.assertEqual(response.status.status, 1)
self.assertEqual(response.name, "test_grpc")
self.assertEqual(response.name, name)
def testROISetName(self):
self.roi_pyosirix.name = "test_grpc"
self.assertEqual(self.roi_pyosirix.name, "test_grpc")
def testROIColor(self):
r, g, b = self.roi_pyosirix.color
response = self.stub.ROIColor(self.roi)
self.assertEqual(response.status.status, 1)
print("ROI RGB color: %d/%d/%d" % (response.r, response.g, response.b))
self.assertEqual(response.r, r)
self.assertEqual(response.g, g)
self.assertEqual(response.b, b)
def testROISetColor(self):
r, g, b = self.roi_pyosirix.color
self.roi_pyosirix = (r, g, b)
def testROIOpacity(self):
opacity = self.roi_pyosirix.opacity
response = self.stub.ROIOpacity(self.roi)
self.assertEqual(response.status.status, 1)
print("ROI opacity: %.2f" % response.opacity)
self.assertEqual(response.opacity, opacity)
def testROISetOpacity(self):
opacity = self.roi_pyosirix.opacity
self.roi_pyosirix.opacity = opacity
def testROIThickness(self):
thickness = self.roi_pyosirix.thickness
response = self.stub.ROIThickness(self.roi)
self.assertEqual(response.status.status, 1)
print("ROI thickness: %.2f" % response.thickness)
self.assertEqual(response.thickness, thickness)
def testROISetThickness(self):
thickness = self.roi_pyosirix.thickness
self.roi_pyosirix.thickness = thickness
def testROIPoints(self):
points_py = self.roi_pyosirix.points
response = self.stub.ROIPoints(self.roi)
self.assertEqual(response.status.status, 1)
points = []
for i in range(len(response.points)):
points.append([response.points[i].x, response.points[i].y])
points = np.array(points)
print("ROI points:\n", points)
self.assertTrue(np.array_equal(points, points_py))
def testROISetPoints(self):
# points_array = self.roi_pyosirix.points
# self.roi_pyosirix.points = points_array
points_response = self.stub.ROIPoints(self.roi)
points_request = []
for i in range(len(points_response.points)):
points_request.append(
roi_pb2.ROISetPointsRequest.Point2D(x=points_response.points[i].x, y=points_response.points[i].y))
request = roi_pb2.ROISetPointsRequest(roi=self.roi, points=points_request)
response = self.stub.ROISetPoints(request)
self.assertEqual(response.status.status, 1)
class PyOsirixTestVRController(GrpcTest):
"""Test case for VRController messaging
"""
def setUp(self):
super().setUp()
self.viewer_controller = self.stub.OsirixFrontmostViewer(utilities_pb2.Empty()).viewer_controller
self.vr_controller = self.stub.ViewerControllerVRControllers(self.viewer_controller).vr_controllers[0]
self.viewer_controller_pyosirix = self.osirix.frontmost_viewer()
self.vr_controller_pyosirix = self.osirix.frontmost_vr_controller()
def testVRControllerWLWW(self):
wlww = self.vr_controller_pyosirix.wlww
response = self.stub.VRControllerWLWW(self.vr_controller)
print("VRController wl/ww: %.2f/%.2f" % (response.wl, response.ww))
self.assertEqual(response.wl, wlww[0])
self.assertEqual(response.ww, wlww[1])
self.assertEqual(response.status.status, 1)
def testVRControllerSetWLWW(self):
test = (200.0, 500.0)
self.vr_controller_pyosirix.wlww = test
wlww_pyosirix = self.vr_controller_pyosirix.wlww
request = vrcontroller_pb2.VRControllerSetWLWWRequest(vr_controller=self.vr_controller, wl=200, ww=500)
response = self.stub.VRControllerSetWLWW(request)
response2 = self.stub.VRControllerWLWW(self.vr_controller)
self.assertEqual(response.status.status, 1)
self.assertEqual(response2.wl, wlww_pyosirix[0])
self.assertEqual(response2.ww, wlww_pyosirix[1])
def testVRControllerRenderingMode(self):
rendering_mode = self.vr_controller_pyosirix.rendering_mode
response = self.stub.VRControllerRenderingMode(self.vr_controller)
print("VRController rendering mode: %s" % response.rendering_mode)
self.assertEqual(response.status.status, 1)
self.assertEqual(response.rendering_mode, rendering_mode)
def testVRControllerSetRenderingMode(self):
self.vr_controller_pyosirix.rendering_mode = "MIP"
rendering_mode = self.vr_controller_pyosirix.rendering_mode
request = vrcontroller_pb2.VRControllerSetRenderingModeRequest(vr_controller=self.vr_controller,
rendering_mode="MIP")
response2 = self.stub.VRControllerRenderingMode(self.vr_controller)
response = self.stub.VRControllerSetRenderingMode(request)
self.assertEqual(response.status.status, 1)
self.assertEqual(response2.rendering_mode, rendering_mode)
def testVRControllerViewer2D(self):
viewer_2d = self.vr_controller_pyosirix.viewer_2d()
response = self.stub.VRControllerViewer2D(self.vr_controller)
self.assertEqual(response.status.status, 1)
self.assertEqual(response.viewer_controller.osirixrpc_uid, self.viewer_controller.osirixrpc_uid)
self.assertEqual(viewer_2d.osirixrpc_uid.osirixrpc_uid, response.viewer_controller.osirixrpc_uid)
def testVRControllerBlendingController(self):
blending_controller = self.vr_controller_pyosirix.blending_controller()
response = self.stub.VRControllerBlendingController(self.vr_controller)
# What is blending controller? It is empty
print(response.viewer_controller)
self.assertEqual(response.status.status,1) # TODO - not much of a test. May need some better test data that allows for this.
self.assertIsNotNone(response.viewer_controller)
self.assertIsNotNone(blending_controller.osirixrpc_uid)
def testVRControllerStyle(self):
style = self.vr_controller_pyosirix.style
response = self.stub.VRControllerStyle(self.vr_controller)
self.assertEqual(response.status.status, 1)
print("VRController style: %s" % response.style)
self.assertEqual(response.style, style)
def testVRControllerTitle(self):
title = self.vr_controller_pyosirix.title
response = self.stub.VRControllerTitle(self.vr_controller)
self.assertEqual(response.status.status, 1)
print("VRController title: %s" % response.title)
self.assertEqual(response.title, title)
def testVRControllerROIVolumes(self):
# Not implemented in pyOsirix yet
# self.vr_controller_pyosirix
response = self.stub.VRControllerROIVolumes(self.vr_controller)
print(response.roi_volumes[0])
self.assertEqual(response.status.status, 1)
class PyOsirixTestBrowserController(GrpcTest):
"""Test case for ROI messaging
"""
def setUp(self):
super().setUp()
self.browser_controller = self.stub.OsirixCurrentBrowser(utilities_pb2.Empty()).browser_controller
self.browser_controller_pyosirix = self.osirix.current_browser()
def testBrowserControllerDatabaseSelection(self):
study_series = self.browser_controller_pyosirix.database_selection()
response = self.stub.BrowserControllerDatabaseSelection(self.browser_controller)
self.assertEqual(response.status.status, 1)
print("Studies: ", response.studies)
print("Series: ", response.series)
self.assertEqual(len(response.studies), len(study_series[0]))
self.assertEqual(len(response.series), len(study_series[1]))
if __name__ == '__main__':
unittest.main()
| 42.986667
| 128
| 0.774983
|
4a029e7c108836c3c90a446ce5f0c3c10617d411
| 2,580
|
py
|
Python
|
mygrations/formats/mysql/file_reader/comment_parser.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 10
|
2018-04-09T08:39:42.000Z
|
2022-03-14T15:36:05.000Z
|
mygrations/formats/mysql/file_reader/comment_parser.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 14
|
2018-05-02T11:14:08.000Z
|
2022-01-15T18:48:54.000Z
|
mygrations/formats/mysql/file_reader/comment_parser.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 5
|
2018-07-18T02:20:48.000Z
|
2022-02-19T09:32:07.000Z
|
from mygrations.core.parse.parser import parser
class comment_parser(parser):
rules = ['nope']
sql = ''
def __init__(self, rules=[]):
# we're not yet a rules-based parser (I may do that eventually), so
# I don't want to run the normal parser __init__()
self.errors = []
self.warnings = []
self.matched = False
def parse(self, sql):
""" res = paser.parse()
Parses the SQL, storing the comment and returning an SQL string with the comment removed
:param sql: The SQL string to parse
:type sql: string
:returns: The SQL string with the first comment removed
:rtype: string
:Example:
>>> import comment_parser
>>> parser = comment_parser( '--Some SQL\nINSERT INTO some_table (1,2)' )
>>> without_comment = parser.parse()
>>> print( parser.comment )
Some SQL
>>> print( without_comment )
INSERT INTO some_table (1,2)
"""
self.sql = sql.strip()
self.comment = ''
# what kind of comment are we dealing with?
# -- and # go to the end of the line. /* goes to */
if self.sql[0] == '#' or self.sql[:2] == '--':
# which is really easy if there is no newline: the
# whole thing is our comment and there is no data left
if not self.sql.count('\n'):
self.comment = self.sql
self._values = {'commment': self.comment}
self.matched = True
return ''
# otherwise just find the newline and return the rest
else:
index = self.sql.index('\n')
self.comment = self.sql[:index]
self._values = {'commment': self.comment}
self.matched = True
return self.sql[index + 1:].strip()
# then we should be dealing with /* ... */. Our line should
# start with it or we have a problem
if self.sql[:2] != '/*':
raise ValueError('SQL passed to comment parser did not start with a comment')
if not self.sql.count('*/'):
self.errors.append('Could not find closing comment indicator, */')
return self.sql
# otherwise this is very straight-forward
index = self.sql.index('*/')
self.comment = self.sql[2:index].strip()
self._values = {'commment': self.comment}
self.matched = True
return self.sql[index + 2:].strip()
| 35.342466
| 96
| 0.537597
|
4a029ef19f2f5150fdbffd4042c4d5c6a8d87659
| 7,199
|
py
|
Python
|
src/launch_ros/test_launch_ros/test/test_launch_ros/frontend/test_node_frontend.py
|
househear/_ws_moveit2
|
ea5c43ddd412ade6b4bebbdb929b6e08b7a5e888
|
[
"Apache-2.0"
] | null | null | null |
src/launch_ros/test_launch_ros/test/test_launch_ros/frontend/test_node_frontend.py
|
househear/_ws_moveit2
|
ea5c43ddd412ade6b4bebbdb929b6e08b7a5e888
|
[
"Apache-2.0"
] | null | null | null |
src/launch_ros/test_launch_ros/test/test_launch_ros/frontend/test_node_frontend.py
|
househear/_ws_moveit2
|
ea5c43ddd412ade6b4bebbdb929b6e08b7a5e888
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of how to parse an xml."""
import io
import pathlib
import sys
import textwrap
from launch import LaunchService
from launch.frontend import Parser
from launch_ros.utilities import evaluate_parameters
import pytest
yaml_params = str(pathlib.Path(__file__).parent / 'params.yaml')
# Escape backslashes if any to keep them after parsing takes place
yaml_params = yaml_params.replace('\\', '\\\\')
python_executable = sys.executable.replace('\\', '\\\\')
xml_file = \
r"""
<launch>
<let name="a_string" value="\'[2, 5, 8]\'"/>
<let name="a_list" value="[2, 5, 8]"/>
<node pkg="demo_nodes_py" exec="talker_qos" output="screen" name="my_talker" namespace="my_ns" exec_name="my_talker_process" args="--number_of_cycles 1">
<param name="param1" value="ads"/>
<param name="param_group1">
<param name="param_group2">
<param name="param2" value="2"/>
</param>
<param name="param3" value="2, 5, 8" value-sep=", "/>
<param name="param4" value="$(var a_list)"/>
<param name="param5" value="$(var a_string)"/>
<param name="param6" value="2., 5., 8." value-sep=", "/>
<param name="param7" value="'2', '5', '8'" value-sep=", "/>
<param name="param8" value="''2'', ''5'', ''8''" value-sep=", "/>
<param name="param9" value="\'2\', \'5\', \'8\'" value-sep=", "/>
<param name="param10" value="''asd'', ''bsd'', ''csd''" value-sep=", "/>
<param name="param11" value="'\asd', '\bsd', '\csd'" value-sep=", "/>
<param name="param12" value=""/>
</param>
<param from="{}"/>
<env name="var" value="1"/>
<remap from="foo" to="bar"/>
<remap from="baz" to="foobar"/>
</node>
<node exec="{}" args="-c 'import sys; print(sys.argv[1:])'" name="my_listener" namespace="my_ns" output="screen"/>
</launch>
""".format(yaml_params, python_executable) # noqa: E501
xml_file = textwrap.dedent(xml_file)
yaml_file = \
r"""
launch:
- let:
name: 'a_string'
value: "'[2, 5, 8]'"
- let:
name: 'a_list'
value: '[2, 5, 8]'
- node:
pkg: demo_nodes_py
exec: talker_qos
output: screen
name: my_talker
namespace: my_ns
exec_name: my_talker_process
args: '--number_of_cycles 1'
param:
- name: param1
value: ads
- name: param_group1
param:
- name: param_group2
param:
- name: param2
value: 2
- name: param3
value: [2, 5, 8]
- name: param4
value: $(var a_list)
- name: param5
value: $(var a_string)
- name: param6
value: [2., 5., 8.]
- name: param7
value: ['2', '5', '8']
- name: param8
value: ["'2'", "'5'", "'8'"]
- name: param9
value: ["\\'2\\'", "\\'5\\'", "\\'8\\'"]
- name: param10
value: ["'asd'", "'bsd'", "'csd'"]
- name: param11
value: ['\asd', '\bsd', '\csd']
- name: param12
value: ''
- from: {}
env:
- name: var
value: '1'
remap:
- from: "foo"
to: "bar"
- from: "baz"
to: "foobar"
- node:
exec: {}
output: screen
namespace: my_ns
name: my_listener
args: -c 'import sys; print(sys.argv[1:])'
""".format(yaml_params, python_executable) # noqa: E501
yaml_file = textwrap.dedent(yaml_file)
@pytest.mark.parametrize('file', (xml_file, yaml_file))
def test_node_frontend(file):
"""Parse node xml example."""
root_entity, parser = Parser.load(io.StringIO(file))
ld = parser.parse_description(root_entity)
ls = LaunchService()
ls.include_launch_description(ld)
assert(0 == ls.run())
evaluated_parameters = evaluate_parameters(
ls.context,
ld.describe_sub_entities()[2]._Node__parameters
)
assert isinstance(evaluated_parameters[0], dict)
assert isinstance(evaluated_parameters[1], dict)
assert isinstance(evaluated_parameters[2], pathlib.Path)
assert 'param1' in evaluated_parameters[0]
assert evaluated_parameters[0]['param1'] == 'ads'
param_dict = evaluated_parameters[1]
assert 'param_group1.param_group2.param2' in param_dict
assert 'param_group1.param3' in param_dict
assert 'param_group1.param4' in param_dict
assert 'param_group1.param5' in param_dict
assert 'param_group1.param6' in param_dict
assert 'param_group1.param7' in param_dict
assert 'param_group1.param8' in param_dict
assert 'param_group1.param9' in param_dict
assert 'param_group1.param10' in param_dict
assert 'param_group1.param11' in param_dict
assert param_dict['param_group1.param_group2.param2'] == 2
assert param_dict['param_group1.param3'] == (2, 5, 8)
assert param_dict['param_group1.param4'] == (2, 5, 8)
assert param_dict['param_group1.param5'] == '[2, 5, 8]'
assert param_dict['param_group1.param6'] == (2., 5., 8.)
assert param_dict['param_group1.param7'] == ('2', '5', '8')
assert param_dict['param_group1.param8'] == ("'2'", "'5'", "'8'")
assert param_dict['param_group1.param9'] == ("'2'", "'5'", "'8'")
assert param_dict['param_group1.param10'] == ("'asd'", "'bsd'", "'csd'")
assert param_dict['param_group1.param11'] == ('asd', 'bsd', 'csd')
assert param_dict['param_group1.param12'] == ''
# Check remappings exist
remappings = ld.describe_sub_entities()[2]._Node__remappings
assert remappings is not None
assert len(remappings) == 2
listener_node_action = ld.describe_sub_entities()[3]
listener_node_cmd = listener_node_action.process_details['cmd']
assert [
sys.executable, '-c', 'import sys; print(sys.argv[1:])'
] == listener_node_cmd[:3]
| 39.338798
| 161
| 0.54327
|
4a029f16d5c8a7222921374d41efacfa5fc9ad86
| 340
|
py
|
Python
|
app/__init.py
|
Hackit321/flask_ip2
|
6a26e0ace048207671272beb680b3172adf91a77
|
[
"MIT"
] | null | null | null |
app/__init.py
|
Hackit321/flask_ip2
|
6a26e0ace048207671272beb680b3172adf91a77
|
[
"MIT"
] | null | null | null |
app/__init.py
|
Hackit321/flask_ip2
|
6a26e0ace048207671272beb680b3172adf91a77
|
[
"MIT"
] | null | null | null |
from config import config_option
from flask import Flask
from flask_bootstrap import Bootstrap
def create_app(config_name):
app = Flask(__name__,)
app.config.from_object(config_option[config_name])
from .news_request import configure_request
configure_request(app)
bootstrap = Bootstrap(app)
return app
| 17.894737
| 54
| 0.752941
|
4a02a1c5b78b913c3e2756ff2921af8673cfa50d
| 3,417
|
py
|
Python
|
bin/utils/tsc/valuewriter.py
|
kurakihx/py-tsz
|
b6aca805e9bb43493649656491416fdeb053f23b
|
[
"BSD-3-Clause"
] | 1
|
2020-11-16T05:52:45.000Z
|
2020-11-16T05:52:45.000Z
|
bin/utils/tsc/valuewriter.py
|
kurakihx/py-tsz
|
b6aca805e9bb43493649656491416fdeb053f23b
|
[
"BSD-3-Clause"
] | null | null | null |
bin/utils/tsc/valuewriter.py
|
kurakihx/py-tsz
|
b6aca805e9bb43493649656491416fdeb053f23b
|
[
"BSD-3-Clause"
] | null | null | null |
import sys, numpy as np
from bin.utils.tsc.bitbuffer import BitBuffer
from bin.utils.tsc.blockinfo import BlockInfo
from bin.utils.tsc.constants import Constants
class ValueWriter(object):
def __init__(self, bitBuffer, bitnum = 64):
self._buffer = bitBuffer
self._previousBlockInfo = None #BlockInfo(0, 0)
self._previousValue = 0
self._bitnum = bitnum
if bitnum == 32:
self.numfunc = np.float32
self.parsefunc = np.uint32
elif bitnum == 64:
self.numfunc = np.double
self.parsefunc = np.uint64
def AppendValue(self, value):
"""
/// Doubles are encoded by XORing them with the previous value. If
/// XORing results in a zero value (value is the same as the previous
/// value), only a single zero bit is stored, otherwise 1 bit is
/// stored.
///
/// For non-zero XORred results, there are two choices:
///
/// 1) If the block of meaningful bits falls in between the block of
/// previous meaningful bits, i.e., there are at least as many
/// leading zeros and as many trailing zeros as with the previous
/// value, use that information for the block position and just
/// store the XORred value.
///
/// 2) Length of the number of leading zeros is stored in the next 5
/// bits, then length of the XORred value is stored in the next 6
/// bits and finally the XORred value is stored.
"""
longValue = int(np.frombuffer(self.numfunc(value).tobytes(), dtype=self.parsefunc)[0])
xorWithPrevious = self._previousValue ^ longValue
if (xorWithPrevious == 0):
# It's the same value.
self._buffer.AddValue(0, 1)
return
self._buffer.AddValue(1, 1)
currentBlockInfo = BlockInfo.CalulcateBlockInfo(xorWithPrevious)
expectedSize = Constants.LeadingZerosLengthBits + Constants.BlockSizeLengthBits + currentBlockInfo.BlockSize
if (not self._previousBlockInfo is None) and\
currentBlockInfo.LeadingZeros >= self._previousBlockInfo.LeadingZeros and\
currentBlockInfo.TrailingZeros >= self._previousBlockInfo.TrailingZeros and\
self._previousBlockInfo.BlockSize < expectedSize:
# Control bit saying we should use the previous block information
self._buffer.AddValue(1,1)
# Write the parts of the value that changed.
blockValue = xorWithPrevious >> self._previousBlockInfo.TrailingZeros
self._buffer.AddValue(blockValue, self._previousBlockInfo.BlockSize)
else:
# Control bit saying we need to provide new block information
self._buffer.AddValue(0, 1)
# Details about the new block information
self._buffer.AddValue(currentBlockInfo.LeadingZeros, Constants.LeadingZerosLengthBits)
self._buffer.AddValue(currentBlockInfo.BlockSize - Constants.BlockSizeAdjustment, Constants.BlockSizeLengthBits)
# Write the parts of the value that changed.
blockValue = xorWithPrevious >> currentBlockInfo.TrailingZeros
self._buffer.AddValue(blockValue, currentBlockInfo.BlockSize)
self._previousBlockInfo = currentBlockInfo
self._previousValue = longValue
| 43.807692
| 124
| 0.652619
|
4a02a1e87ebc40ac6a55e1151ac46038b860a640
| 1,666
|
py
|
Python
|
lace/shapes.py
|
bodylabs/lace
|
a6ae80787c8c6ba197bd9bad9254b503f4e05c73
|
[
"BSD-2-Clause"
] | 2
|
2020-05-30T10:28:34.000Z
|
2021-02-17T13:47:23.000Z
|
lace/shapes.py
|
lace/lace
|
a6ae80787c8c6ba197bd9bad9254b503f4e05c73
|
[
"BSD-2-Clause"
] | 11
|
2019-08-29T16:53:29.000Z
|
2021-07-01T06:24:37.000Z
|
lace/shapes.py
|
bodylabs/lace
|
a6ae80787c8c6ba197bd9bad9254b503f4e05c73
|
[
"BSD-2-Clause"
] | 5
|
2017-05-09T16:18:16.000Z
|
2018-05-08T16:16:09.000Z
|
from polliwog import shapes
def _shape_as_mesh(shape_factory_fn, *args, **kwargs):
from lace.mesh import Mesh
vertices, faces = shape_factory_fn(*args, ret_unique_vertices_and_faces=True, **kwargs)
return Mesh(v=vertices, f=faces)
def create_rectangular_prism(*args, **kwargs):
'''
Return a Mesh which is an axis-aligned rectangular prism. One vertex is
`origin`; the diametrically opposite vertex is `origin + size`.
size: 3x1 array.
'''
return _shape_as_mesh(shapes.create_rectangular_prism, *args, **kwargs)
def create_cube(*args, **kwargs):
'''
Return a mesh with an axis-aligned cube. One vertex is `origin`; the
diametrically opposite vertex is `size` units along +x, +y, and +z.
size: int or float.
'''
return _shape_as_mesh(shapes.create_cube, *args, **kwargs)
def create_triangular_prism(*args, **kwargs):
'''
Return a Mesh which is a triangular prism whose base is the triangle
p1, p2, p3. If the vertices are oriented in a counterclockwise
direction, the prism extends from behind them.
'''
return _shape_as_mesh(shapes.create_triangular_prism, *args, **kwargs)
def create_rectangle(*args, **kwargs):
'''
Creates a horizontal plane.
'''
return _shape_as_mesh(shapes.create_rectangle, *args, **kwargs)
def _main():
import math
import numpy as np
points = np.array([
[1, 0, 0],
[0, math.sqrt(1.25), 0],
[-1, 0, 0],
])
prism = create_triangular_prism(*points, height=4)
prism.show()
cube = create_cube(np.array([1., 0., 0.]), 4.)
cube.show()
if __name__ == '__main__':
_main()
| 24.865672
| 91
| 0.660864
|
4a02a1ef1bc7df3b1a0b0cd2f4755f445023ca99
| 190
|
py
|
Python
|
riscv_isac/__init__.py
|
edwin7026/riscv-isac
|
bb2e6d12c3d25f344191c89309dd98830aeef336
|
[
"BSD-3-Clause"
] | 5
|
2021-11-04T18:13:32.000Z
|
2022-03-17T23:40:38.000Z
|
riscv_isac/__init__.py
|
edwin7026/riscv-isac
|
bb2e6d12c3d25f344191c89309dd98830aeef336
|
[
"BSD-3-Clause"
] | 9
|
2021-10-20T10:38:42.000Z
|
2022-03-25T16:54:50.000Z
|
riscv_isac/__init__.py
|
edwin7026/riscv-isac
|
bb2e6d12c3d25f344191c89309dd98830aeef336
|
[
"BSD-3-Clause"
] | 10
|
2021-09-27T15:04:05.000Z
|
2022-03-19T14:50:51.000Z
|
# See LICENSE.incore for details
"""Top-level package for RISC-V ISA Coverage."""
__author__ = """InCore Semiconductors Pvt Ltd"""
__email__ = 'info@incoresemi.com'
__version__ = '0.13.2'
| 23.75
| 48
| 0.721053
|
4a02a3a08bb04beaa41367215d427605820cd26f
| 6,251
|
py
|
Python
|
data_pre-processing/xml2json.py
|
AlongRide/CenterNet_anchor_free
|
836b5a3666756ef38f34ed4c021e541d0b5c1fa2
|
[
"MIT"
] | null | null | null |
data_pre-processing/xml2json.py
|
AlongRide/CenterNet_anchor_free
|
836b5a3666756ef38f34ed4c021e541d0b5c1fa2
|
[
"MIT"
] | null | null | null |
data_pre-processing/xml2json.py
|
AlongRide/CenterNet_anchor_free
|
836b5a3666756ef38f34ed4c021e541d0b5c1fa2
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
import os
import json
coco = dict()
coco['images'] = []
coco['type'] = 'instances'
coco['annotations'] = []
coco['categories'] = []
category_set = dict()
image_set = set()
category_item_id = 0
image_id = 20180000000
annotation_id = 0
def addCatItem(name):
global category_item_id
category_item = dict()
category_item['supercategory'] = 'none'
category_item_id += 1
category_item['id'] = category_item_id
category_item['name'] = name
coco['categories'].append(category_item)
category_set[name] = category_item_id
return category_item_id
def addImgItem(file_name, size):
global image_id
if file_name is None:
raise Exception('Could not find filename tag in xml file.')
if size['width'] is None:
raise Exception('Could not find width tag in xml file.')
if size['height'] is None:
raise Exception('Could not find height tag in xml file.')
image_id += 1
image_item = dict()
image_item['id'] = image_id
image_item['file_name'] = file_name
image_item['width'] = size['width']
image_item['height'] = size['height']
coco['images'].append(image_item)
image_set.add(file_name)
return image_id
def addAnnoItem(object_name, image_id, category_id, bbox):
global annotation_id
annotation_item = dict()
annotation_item['segmentation'] = []
seg = []
#bbox[] is x,y,w,h
#left_top
seg.append(bbox[0])
seg.append(bbox[1])
#left_bottom
seg.append(bbox[0])
seg.append(bbox[1] + bbox[3])
#right_bottom
seg.append(bbox[0] + bbox[2])
seg.append(bbox[1] + bbox[3])
#right_top
seg.append(bbox[0] + bbox[2])
seg.append(bbox[1])
annotation_item['segmentation'].append(seg)
annotation_item['area'] = bbox[2] * bbox[3]
annotation_item['iscrowd'] = 0
annotation_item['ignore'] = 0
annotation_item['image_id'] = image_id
annotation_item['bbox'] = bbox
annotation_item['category_id'] = category_id
annotation_id += 1
annotation_item['id'] = annotation_id
coco['annotations'].append(annotation_item)
def parseXmlFiles(xml_path):
for f in os.listdir(xml_path):
if not f.endswith('.xml'):
continue
bndbox = dict()
size = dict()
current_image_id = None
current_category_id = None
file_name = None
size['width'] = None
size['height'] = None
size['depth'] = None
xml_file = os.path.join(xml_path, f)
print(xml_file)
tree = ET.parse(xml_file)
root = tree.getroot()
if root.tag != 'annotation':
raise Exception('pascal voc xml root element should be annotation, rather than {}'.format(root.tag))
#elem is <folder>, <filename>, <size>, <object>
for elem in root:
current_parent = elem.tag
current_sub = None
object_name = None
if elem.tag == 'folder':
continue
if elem.tag == 'filename':
file_name = elem.text
if file_name in category_set:
raise Exception('file_name duplicated')
#add img item only after parse <size> tag
elif current_image_id is None and file_name is not None and size['width'] is not None:
if file_name not in image_set:
current_image_id = addImgItem(file_name, size)
print('add image with {} and {}'.format(file_name, size))
else:
raise Exception('duplicated image: {}'.format(file_name))
#subelem is <width>, <height>, <depth>, <name>, <bndbox>
for subelem in elem:
bndbox ['xmin'] = None
bndbox ['xmax'] = None
bndbox ['ymin'] = None
bndbox ['ymax'] = None
current_sub = subelem.tag
if current_parent == 'object' and subelem.tag == 'name':
object_name = subelem.text
if object_name not in category_set:
current_category_id = addCatItem(object_name)
else:
current_category_id = category_set[object_name]
elif current_parent == 'size':
if size[subelem.tag] is not None:
raise Exception('xml structure broken at size tag.')
size[subelem.tag] = int(subelem.text)
#option is <xmin>, <ymin>, <xmax>, <ymax>, when subelem is <bndbox>
for option in subelem:
if current_sub == 'bndbox':
if bndbox[option.tag] is not None:
raise Exception('xml structure corrupted at bndbox tag.')
bndbox[option.tag] = int(option.text)
#only after parse the <object> tag
if bndbox['xmin'] is not None:
if object_name is None:
raise Exception('xml structure broken at bndbox tag')
if current_image_id is None:
raise Exception('xml structure broken at bndbox tag')
if current_category_id is None:
raise Exception('xml structure broken at bndbox tag')
bbox = []
#x
bbox.append(bndbox['xmin'])
#y
bbox.append(bndbox['ymin'])
#w
bbox.append(bndbox['xmax'] - bndbox['xmin'])
#h
bbox.append(bndbox['ymax'] - bndbox['ymin'])
print('add annotation with {},{},{},{}'.format(object_name, current_image_id, current_category_id, bbox))
addAnnoItem(object_name, current_image_id, current_category_id, bbox )
if __name__ == '__main__':
xml_path = '/home/ting/tensorflow/Driving/xml_train/'
json_file = '/home/ting/tensorflow/Driving/json_train/test.json'
parseXmlFiles(xml_path)
json.dump(coco, open(json_file, 'w'))
| 36.132948
| 125
| 0.557671
|
4a02a3aac81b347968518ebf37d603b4ff06b168
| 1,585
|
py
|
Python
|
pysamples/pytictoc/txc7.py
|
ranarashadmahmood/OMNETPY
|
13ab49106a3ac700aa633a8eb37acdad5e3157ab
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 31
|
2020-06-23T13:53:47.000Z
|
2022-03-28T08:09:00.000Z
|
pysamples/pytictoc/txc7.py
|
ranarashadmahmood/OMNETPY
|
13ab49106a3ac700aa633a8eb37acdad5e3157ab
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 8
|
2020-11-01T21:35:47.000Z
|
2021-08-29T11:40:50.000Z
|
pysamples/pytictoc/txc7.py
|
ranarashadmahmood/OMNETPY
|
13ab49106a3ac700aa633a8eb37acdad5e3157ab
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 8
|
2021-03-22T15:32:22.000Z
|
2022-02-02T14:57:56.000Z
|
from pyopp import cSimpleModule, cMessage, EV, WATCH, simTime
EV << "---- Loading module {} ----\n".format(__name__)
class PyTxc7(cSimpleModule):
def initialize(self):
self.event = cMessage('event')
self.tictocmsg = None
if self.getName() == 'tic':
EV << "Scheduling first send to t=5.0s\n";
self.tictocmsg = cMessage("tictocmsg")
self.scheduleAt(5.0, self.event)
def handleMessage(self, msg):
if msg is self.event:
# The self-message arrived, so we can send out tictocmsg and nullptr out
# its pointer so that it doesn't confuse us later.
EV << "Wait period is over, sending back message\n"
self.send(self.tictocmsg, "out")
self.tictocmsg = None
else:
# "Lose" the message with 0.1 probability:
if self.uniform(0, 1) < 0.1:
EV << '"Losing" message\n';
self.delete(msg)
else:
# The "delayTime" module parameter can be set to values like
# "exponential(5)" (tictoc7.ned, omnetpp.ini), and then here
# we'll get a different delay every time.
delay = self.par("delayTime").doubleValue()
EV << "Message arrived, starting to wait " << delay << " secs...\n";
self.tictocmsg = msg;
self.scheduleAt(simTime() + delay, self.event);
def __del__(self):
self.cancelAndDelete(self.event)
if self.tictocmsg:
self.delete(self.tictocmsg)
| 37.738095
| 84
| 0.555205
|
4a02a3b724ef1b727ed5e3c0ff491f1f55f429bf
| 3,504
|
py
|
Python
|
core/polyaxon/proxies/schemas/gateway/services.py
|
Ohtar10/polyaxon
|
1e41804e4ae6466b6928d06bc6ee6d2d9c7b8931
|
[
"Apache-2.0"
] | null | null | null |
core/polyaxon/proxies/schemas/gateway/services.py
|
Ohtar10/polyaxon
|
1e41804e4ae6466b6928d06bc6ee6d2d9c7b8931
|
[
"Apache-2.0"
] | 51
|
2021-04-06T07:59:21.000Z
|
2022-03-29T01:08:22.000Z
|
core/polyaxon/proxies/schemas/gateway/services.py
|
Ohtar10/polyaxon
|
1e41804e4ae6466b6928d06bc6ee6d2d9c7b8931
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon import settings
from polyaxon.proxies.schemas.base import get_config
PLUGIN_OPTIONS = r"""
location ~ /{plugin_name}/proxy/([-_.:\w]+)/(.*) {{
{auth}
{resolver}
rewrite_log on;
rewrite ^/{plugin_name}/proxy/([-_.:\w]+)/(.*) /{plugin_name}/proxy/$1/$2 break;
proxy_pass http://$1:{port};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}}
""" # noqa
def get_plugin_location_config(name: str, port: int, resolver: str, auth: str):
return get_config(
options=PLUGIN_OPTIONS,
indent=0,
plugin_name=name,
port=port,
resolver=resolver,
auth=auth,
)
def get_plugins_location_config(resolver: str, auth: str, proxy_services=None):
plugins = []
if proxy_services:
for plugin, config in proxy_services.items():
plugins.append(
get_plugin_location_config(
name=plugin, port=config["port"], resolver=resolver, auth=auth
)
)
return plugins
SERVICES_OPTIONS = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {{
{auth}
{resolver}
proxy_pass http://plx-operation-$4.$1.svc.{dns_cluster_with_port};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}}
""" # noqa
SERVICES_REWRITE_OPTIONS = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {{
{auth}
{resolver}
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.{dns_cluster_with_port};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}}
""" # noqa
def get_services_location_config(resolver: str, auth: str, rewrite: bool = False):
dns_cluster_with_port = settings.PROXIES_CONFIG.dns_custom_cluster
if settings.PROXIES_CONFIG.services_port != 80:
dns_cluster_with_port = "{}:{}".format(
dns_cluster_with_port, settings.PROXIES_CONFIG.services_port
)
return get_config(
options=SERVICES_REWRITE_OPTIONS if rewrite else SERVICES_OPTIONS,
resolver=resolver,
auth=auth,
dns_cluster_with_port=dns_cluster_with_port,
)
| 31.854545
| 102
| 0.665525
|
4a02a3bda0f7ac1a5477869f014b7a41b87e406d
| 14,880
|
py
|
Python
|
frappe/website/doctype/web_form/web_form.py
|
mohamedesmail235/frappe
|
b175ac407ff01092b5fdec6299f5fc0757f3b53e
|
[
"MIT"
] | null | null | null |
frappe/website/doctype/web_form/web_form.py
|
mohamedesmail235/frappe
|
b175ac407ff01092b5fdec6299f5fc0757f3b53e
|
[
"MIT"
] | 89
|
2017-09-19T15:17:44.000Z
|
2022-03-31T00:52:42.000Z
|
frappe/website/doctype/web_form/web_form.py
|
mohamedesmail235/frappe
|
b175ac407ff01092b5fdec6299f5fc0757f3b53e
|
[
"MIT"
] | 1
|
2018-02-08T01:14:48.000Z
|
2018-02-08T01:14:48.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json, os
from frappe.website.website_generator import WebsiteGenerator
from frappe import _, scrub
from frappe.utils import cstr
from frappe.utils.file_manager import save_file, remove_file_by_url
from frappe.website.utils import get_comment_list
from frappe.custom.doctype.customize_form.customize_form import docfield_properties
from frappe.utils.file_manager import get_max_file_size
from frappe.modules.utils import export_module_json, get_doc_module
from six.moves.urllib.parse import urlencode
from frappe.integrations.utils import get_payment_gateway_controller
from six import iteritems
class WebForm(WebsiteGenerator):
website = frappe._dict(
no_cache = 1
)
def onload(self):
super(WebForm, self).onload()
if self.is_standard and not frappe.conf.developer_mode:
self.use_meta_fields()
def validate(self):
super(WebForm, self).validate()
if not self.module:
self.module = frappe.db.get_value('DocType', self.doc_type, 'module')
if (not (frappe.flags.in_install or frappe.flags.in_patch or frappe.flags.in_test or frappe.flags.in_fixtures)
and self.is_standard and not frappe.conf.developer_mode):
frappe.throw(_("You need to be in developer mode to edit a Standard Web Form"))
if not frappe.flags.in_import:
self.validate_fields()
if self.accept_payment:
self.validate_payment_amount()
def validate_fields(self):
'''Validate all fields are present'''
from frappe.model import no_value_fields
missing = []
meta = frappe.get_meta(self.doc_type)
for df in self.web_form_fields:
if df.fieldname and (df.fieldtype not in no_value_fields and not meta.has_field(df.fieldname)):
missing.append(df.fieldname)
if missing:
frappe.throw(_('Following fields are missing:') + '<br>' + '<br>'.join(missing))
def validate_payment_amount(self):
if self.amount_based_on_field and not self.amount_field:
frappe.throw(_("Please select a Amount Field."))
elif not self.amount_based_on_field and not self.amount > 0:
frappe.throw(_("Amount must be greater than 0."))
def reset_field_parent(self):
'''Convert link fields to select with names as options'''
for df in self.web_form_fields:
df.parent = self.doc_type
def use_meta_fields(self):
'''Override default properties for standard web forms'''
meta = frappe.get_meta(self.doc_type)
for df in self.web_form_fields:
meta_df = meta.get_field(df.fieldname)
if not meta_df:
continue
for prop in docfield_properties:
if df.fieldtype==meta_df.fieldtype and prop not in ("idx",
"reqd", "default", "description", "default", "options",
"hidden", "read_only", "label"):
df.set(prop, meta_df.get(prop))
# TODO translate options of Select fields like Country
# export
def on_update(self):
"""
Writes the .txt for this page and if write_content is checked,
it will write out a .html file
"""
path = export_module_json(self, self.is_standard, self.module)
if path:
# js
if not os.path.exists(path + '.js'):
with open(path + '.js', 'w') as f:
f.write("""frappe.ready(function() {
// bind events here
})""")
# py
if not os.path.exists(path + '.py'):
with open(path + '.py', 'w') as f:
f.write("""from __future__ import unicode_literals
import frappe
def get_context(context):
# do your magic here
pass
""")
def get_context(self, context):
'''Build context to render the `web_form.html` template'''
self.set_web_form_module()
context._login_required = False
if self.login_required and frappe.session.user == "Guest":
context._login_required = True
doc, delimeter = make_route_string(frappe.form_dict)
context.doc = doc
context.delimeter = delimeter
# check permissions
if frappe.session.user == "Guest" and frappe.form_dict.name:
frappe.throw(_("You need to be logged in to access this {0}.").format(self.doc_type), frappe.PermissionError)
if frappe.form_dict.name and not has_web_form_permission(self.doc_type, frappe.form_dict.name):
frappe.throw(_("You don't have the permissions to access this document"), frappe.PermissionError)
self.reset_field_parent()
if self.is_standard:
self.use_meta_fields()
if not context._login_required:
if self.allow_edit:
if self.allow_multiple:
if not frappe.form_dict.name and not frappe.form_dict.new:
self.build_as_list(context)
else:
if frappe.session.user != 'Guest' and not frappe.form_dict.name:
frappe.form_dict.name = frappe.db.get_value(self.doc_type, {"owner": frappe.session.user}, "name")
if not frappe.form_dict.name:
# only a single doc allowed and no existing doc, hence new
frappe.form_dict.new = 1
# always render new form if login is not required or doesn't allow editing existing ones
if not self.login_required or not self.allow_edit:
frappe.form_dict.new = 1
self.load_document(context)
context.parents = self.get_parents(context)
if self.breadcrumbs:
context.parents = frappe.safe_eval(self.breadcrumbs, { "_": _ })
context.has_header = ((frappe.form_dict.name or frappe.form_dict.new)
and (frappe.session.user!="Guest" or not self.login_required))
if context.success_message:
context.success_message = frappe.db.escape(context.success_message.replace("\n",
"<br>"))
self.add_custom_context_and_script(context)
if not context.max_attachment_size:
context.max_attachment_size = get_max_file_size() / 1024 / 1024
def load_document(self, context):
'''Load document `doc` and `layout` properties for template'''
if frappe.form_dict.name or frappe.form_dict.new:
context.layout = self.get_layout()
context.parents = [{"route": self.route, "label": _(self.title) }]
if frappe.form_dict.name:
context.doc = frappe.get_doc(self.doc_type, frappe.form_dict.name)
context.title = context.doc.get(context.doc.meta.get_title_field())
context.doc.add_seen()
context.reference_doctype = context.doc.doctype
context.reference_name = context.doc.name
if self.allow_comments:
context.comment_list = get_comment_list(context.doc.doctype,
context.doc.name)
def build_as_list(self, context):
'''Web form is a list, show render as list.html'''
from frappe.www.list import get_context as get_list_context
# set some flags to make list.py/list.html happy
frappe.form_dict.web_form_name = self.name
frappe.form_dict.doctype = self.doc_type
frappe.flags.web_form = self
self.update_params_from_form_dict(context)
self.update_list_context(context)
get_list_context(context)
context.is_list = True
def update_params_from_form_dict(self, context):
'''Copy params from list view to new view'''
context.params_from_form_dict = ''
params = {}
for key, value in iteritems(frappe.form_dict):
if frappe.get_meta(self.doc_type).get_field(key):
params[key] = value
if params:
context.params_from_form_dict = '&' + urlencode(params)
def update_list_context(self, context):
'''update list context for stanard modules'''
if hasattr(self, 'web_form_module') and hasattr(self.web_form_module, 'get_list_context'):
self.web_form_module.get_list_context(context)
def get_payment_gateway_url(self, doc):
if self.accept_payment:
controller = get_payment_gateway_controller(self.payment_gateway)
title = "Payment for {0} {1}".format(doc.doctype, doc.name)
amount = self.amount
if self.amount_based_on_field:
amount = doc.get(self.amount_field)
payment_details = {
"amount": amount,
"title": title,
"description": title,
"reference_doctype": doc.doctype,
"reference_docname": doc.name,
"payer_email": frappe.session.user,
"payer_name": frappe.utils.get_fullname(frappe.session.user),
"order_id": doc.name,
"currency": self.currency,
"redirect_to": frappe.utils.get_url(self.route)
}
# Redirect the user to this url
return controller.get_payment_url(**payment_details)
def add_custom_context_and_script(self, context):
'''Update context from module if standard and append script'''
if self.web_form_module:
new_context = self.web_form_module.get_context(context)
if new_context:
context.update(new_context)
js_path = os.path.join(os.path.dirname(self.web_form_module.__file__), scrub(self.name) + '.js')
if os.path.exists(js_path):
context.script = frappe.render_template(open(js_path, 'r').read(), context)
css_path = os.path.join(os.path.dirname(self.web_form_module.__file__), scrub(self.name) + '.css')
if os.path.exists(css_path):
context.style = open(css_path, 'r').read()
def get_layout(self):
layout = []
def add_page(df=None):
new_page = {'sections': []}
layout.append(new_page)
if df and df.fieldtype=='Page Break':
new_page.update(df.as_dict())
return new_page
def add_section(df=None):
new_section = {'columns': []}
layout[-1]['sections'].append(new_section)
if df and df.fieldtype=='Section Break':
new_section.update(df.as_dict())
return new_section
def add_column(df=None):
new_col = []
layout[-1]['sections'][-1]['columns'].append(new_col)
return new_col
page, section, column = None, None, None
for df in self.web_form_fields:
# breaks
if df.fieldtype=='Page Break':
page = add_page(df)
section, column = None, None
if df.fieldtype=='Section Break':
section = add_section(df)
column = None
if df.fieldtype=='Column Break':
column = add_column(df)
# input
if df.fieldtype not in ('Section Break', 'Column Break', 'Page Break'):
if not page:
page = add_page()
section, column = None, None
if not section:
section = add_section()
column = None
if column==None:
column = add_column()
column.append(df)
return layout
def get_parents(self, context):
parents = None
if context.is_list and not context.parents:
parents = [{"title": _("My Account"), "name": "me"}]
elif context.parents:
parents = context.parents
return parents
def set_web_form_module(self):
'''Get custom web form module if exists'''
if self.is_standard:
self.web_form_module = get_doc_module(self.module, self.doctype, self.name)
else:
self.web_form_module = None
def validate_mandatory(self, doc):
'''Validate mandatory web form fields'''
missing = []
for f in self.web_form_fields:
if f.reqd and doc.get(f.fieldname) in (None, [], ''):
missing.append(f)
if missing:
frappe.throw(_('Mandatory Information missing:') + '<br><br>'
+ '<br>'.join(['{0} ({1})'.format(d.label, d.fieldtype) for d in missing]))
@frappe.whitelist(allow_guest=True)
def accept(web_form, data, for_payment=False):
'''Save the web form'''
data = frappe._dict(json.loads(data))
files = []
files_to_delete = []
web_form = frappe.get_doc("Web Form", web_form)
if data.doctype != web_form.doc_type:
frappe.throw(_("Invalid Request"))
elif data.name and not web_form.allow_edit:
frappe.throw(_("You are not allowed to update this Web Form Document"))
frappe.flags.in_web_form = True
if data.name:
# update
doc = frappe.get_doc(data.doctype, data.name)
else:
# insert
doc = frappe.new_doc(data.doctype)
# set values
for fieldname, value in iteritems(data):
if value and isinstance(value, dict):
try:
if "__file_attachment" in value:
files.append((fieldname, value))
continue
if '__no_attachment' in value:
files_to_delete.append(doc.get(fieldname))
value = ''
except ValueError:
pass
doc.set(fieldname, value)
if for_payment:
web_form.validate_mandatory(doc)
doc.run_method('validate_payment')
if doc.name:
if has_web_form_permission(doc.doctype, doc.name, "write"):
doc.save(ignore_permissions=True)
else:
# only if permissions are present
doc.save()
else:
# insert
if web_form.login_required and frappe.session.user=="Guest":
frappe.throw(_("You must login to submit this form"))
doc.insert(ignore_permissions = True)
# add files
if files:
for f in files:
fieldname, filedata = f
# remove earlier attached file (if exists)
if doc.get(fieldname):
remove_file_by_url(doc.get(fieldname), doc.doctype, doc.name)
# save new file
filedoc = save_file(filedata["filename"], filedata["dataurl"],
doc.doctype, doc.name, decode=True)
# update values
doc.set(fieldname, filedoc.file_url)
doc.save()
if files_to_delete:
for f in files_to_delete:
if f:
remove_file_by_url(f, doc.doctype, doc.name)
frappe.flags.web_form_doc = doc
if for_payment:
return web_form.get_payment_gateway_url(doc)
else:
return doc.name
@frappe.whitelist()
def delete(web_form, name):
web_form = frappe.get_doc("Web Form", web_form)
owner = frappe.db.get_value(web_form.doc_type, name, "owner")
if frappe.session.user == owner and web_form.allow_delete:
frappe.delete_doc(web_form.doc_type, name, ignore_permissions=True)
else:
raise frappe.PermissionError("Not Allowed")
def has_web_form_permission(doctype, name, ptype='read'):
if frappe.session.user=="Guest":
return False
# owner matches
elif frappe.db.get_value(doctype, name, "owner")==frappe.session.user:
return True
elif frappe.has_website_permission(name, ptype=ptype, doctype=doctype):
return True
elif check_webform_perm(doctype, name):
return True
else:
return False
def check_webform_perm(doctype, name):
doc = frappe.get_doc(doctype, name)
if hasattr(doc, "has_webform_permission"):
if doc.has_webform_permission():
return True
def get_web_form_list(doctype, txt, filters, limit_start, limit_page_length=20, order_by=None):
from frappe.www.list import get_list
if not filters:
filters = {}
filters["owner"] = frappe.session.user
return get_list(doctype, txt, filters, limit_start, limit_page_length, order_by=order_by,
ignore_permissions=True)
def make_route_string(parameters):
route_string = ""
delimeter = '?'
if isinstance(parameters, dict):
for key in parameters:
if key != "web_form_name":
route_string += route_string + delimeter + key + "=" + cstr(parameters[key])
delimeter = '&'
return (route_string, delimeter)
@frappe.whitelist(allow_guest=True)
def get_form_data(doctype, docname, web_form_name):
out = frappe._dict()
if docname:
doc = frappe.get_doc(doctype, docname)
if doc.has_permission("read"):
out.doc = doc
else:
frappe.throw(_("Not permitted"), frappe.PermissionError)
webform = frappe.get_doc('Web Form', web_form_name)
fields = webform.get('web_form_fields') or []
links = {}
for f in fields:
if f.fieldtype == "Link":
links[f.fieldname] = [d.name for d in frappe.get_all(f.options, fields=["name"])]
out.web_form = webform
out.links = links
return out
| 29.119374
| 112
| 0.720632
|
4a02a3bdafdfbd993b6e316d6ce07a9a6eef50cd
| 4,044
|
py
|
Python
|
indico/modules/attachments/models/legacy_mapping.py
|
uxmaster/indico
|
ecd19f17ef6fdc9f5584f59c87ec647319ce5d31
|
[
"MIT"
] | 1
|
2019-11-03T11:34:16.000Z
|
2019-11-03T11:34:16.000Z
|
indico/modules/attachments/models/legacy_mapping.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
indico/modules/attachments/models/legacy_mapping.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.util.string import return_ascii
class _LegacyLinkMixin(object):
events_backref_name = None
@declared_attr
def event_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
nullable=False,
index=True
)
@declared_attr
def session_id(cls):
return db.Column(
db.String,
nullable=True
)
@declared_attr
def contribution_id(cls):
return db.Column(
db.String,
nullable=True
)
@declared_attr
def subcontribution_id(cls):
return db.Column(
db.String,
nullable=True
)
@declared_attr
def event(cls):
return db.relationship(
'Event',
lazy=True,
backref=db.backref(
cls.events_backref_name,
lazy='dynamic'
)
)
@property
def link_repr(self):
"""A kwargs-style string suitable for the object's repr"""
_all_columns = {'event_id', 'contribution_id', 'subcontribution_id', 'session_id'}
info = [(key, getattr(self, key)) for key in _all_columns if getattr(self, key) is not None]
return ', '.join('{}={}'.format(key, value) for key, value in info)
class LegacyAttachmentFolderMapping(_LegacyLinkMixin, db.Model):
"""Legacy attachmentfolder id mapping
Legacy folders ("materials") had ids unique only within their
linked object. This table maps those ids for a specific object
to the new globally unique folder id.
"""
__tablename__ = 'legacy_folder_id_map'
events_backref_name = 'all_legacy_attachment_folder_mappings'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls, schema='attachments')
material_id = db.Column(
db.String,
nullable=False
)
folder_id = db.Column(
db.Integer,
db.ForeignKey('attachments.folders.id'),
primary_key=True,
autoincrement=False
)
folder = db.relationship(
'AttachmentFolder',
lazy=False,
backref=db.backref('legacy_mapping', uselist=False, lazy=True)
)
@return_ascii
def __repr__(self):
return '<LegacyAttachmentFolderMapping({}, material_id={}, {})>'.format(
self.folder, self.material_id, self.link_repr
)
class LegacyAttachmentMapping(_LegacyLinkMixin, db.Model):
"""Legacy attachment id mapping
Legacy attachments ("resources") had ids unique only within their
folder and its linked object. This table maps those ids for a
specific object to the new globally unique attachment id.
"""
__tablename__ = 'legacy_attachment_id_map'
events_backref_name = 'all_legacy_attachment_mappings'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls, schema='attachments')
material_id = db.Column(
db.String,
nullable=False
)
resource_id = db.Column(
db.String,
nullable=False
)
attachment_id = db.Column(
db.Integer,
db.ForeignKey('attachments.attachments.id'),
primary_key=True,
autoincrement=False
)
attachment = db.relationship(
'Attachment',
lazy=False,
backref=db.backref('legacy_mapping', uselist=False, lazy=True)
)
@return_ascii
def __repr__(self):
return '<LegacyAttachmentMapping({}, material_id={}, resource_id={}, {})>'.format(
self.attachment, self.material_id, self.resource_id, self.link_repr
)
| 27.510204
| 100
| 0.636499
|
4a02a5e3baad80bffa5465e99f3e8bdef69ec287
| 1,125
|
py
|
Python
|
app/core/models.py
|
anuwarr/recipe-app-api
|
9ea342c7c2e627dd00d47e3db1c526bd0b44b60e
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
anuwarr/recipe-app-api
|
9ea342c7c2e627dd00d47e3db1c526bd0b44b60e
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
anuwarr/recipe-app-api
|
9ea342c7c2e627dd00d47e3db1c526bd0b44b60e
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new User"""
if not email:
raise ValueError('all users must have a email')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,password):
user= self.create_user(email,password)
user.is_staff=True
user.is_superuser =True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 31.25
| 76
| 0.691556
|
4a02a5e741a59bcc11535537d11bb313a8d68d93
| 414
|
py
|
Python
|
qiskit_addon_sympy/__init__.py
|
ajavadia/qiskit-addon-sympy
|
eac15d3a20dbe4754415ac0f3678cf0015169cbe
|
[
"Apache-2.0"
] | null | null | null |
qiskit_addon_sympy/__init__.py
|
ajavadia/qiskit-addon-sympy
|
eac15d3a20dbe4754415ac0f3678cf0015169cbe
|
[
"Apache-2.0"
] | null | null | null |
qiskit_addon_sympy/__init__.py
|
ajavadia/qiskit-addon-sympy
|
eac15d3a20dbe4754415ac0f3678cf0015169cbe
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Local Sympy Backends."""
from .sympy_statevector_simulator import SympyStatevectorSimulator
from .sympy_unitary_simulator import SympyUnitarySimulator
from .sympyprovider import SympyProvider
__version__ = '0.1.0'
| 27.6
| 77
| 0.777778
|
4a02a60d71771d715cbcadb1d0b2064c53f2b363
| 17,731
|
py
|
Python
|
eegpy/ui/event_editor.py
|
thorstenkranz/eegpy
|
0f9461456999874abbb774896ca832eb27740a9d
|
[
"BSD-2-Clause-FreeBSD"
] | 10
|
2015-05-12T10:42:51.000Z
|
2021-07-20T02:08:03.000Z
|
eegpy/ui/event_editor.py
|
thorstenkranz/eegpy
|
0f9461456999874abbb774896ca832eb27740a9d
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2015-11-19T11:36:30.000Z
|
2018-03-21T05:00:09.000Z
|
eegpy/ui/event_editor.py
|
thorstenkranz/eegpy
|
0f9461456999874abbb774896ca832eb27740a9d
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2016-09-21T22:41:34.000Z
|
2019-01-28T13:55:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#################
# Module-Import #
#################
#eegpy-modules
try:
import eegpy
from eegpy.events import EventTable
from eegpy.misc import FATALERROR
from eegpy.ui.widgets.windowwidgets import EegpyBaseWin
from eegpy.ui.icon import image_from_eegpy_stock, eegpy_logo
except ImportError:
raise FATALERROR('Your installation of EegPy seems to be incomplete.\nMaybe you need to set the PYTHONPATH environment-variable adequatly.')
#from eegpy.filter.filt_misc import filterRecursively
#Third-party
try:
import numpy
from scipy.signal import lfilter, butter
except ImportError:
raise FATALERROR('SciPy or NumPy not found!\nPlease visit www.scipy.org or numeric.scipy.org for more information.')
try:
import pygtk
pygtk.require('2.0')
import gobject
import gtk
except ImportError:
raise FATALERROR('GTK cannot be imported.')
#try:
# from matplotlib.axes import Subplot
# # uncomment to select /GTK/GTKAgg/GTKCairo
# from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
# from matplotlib.backends.backend_gtk import NavigationToolbar2GTK as NavigationToolbar
# import matplotlib
# #from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg, NavigationToolbar
# from matplotlib.figure import Figure, SubplotParams
# from matplotlib.axis import Axis
# import matplotlib.cm
#except ImportError:
# raise FATALERROR('Error while importing matplotib. Please visit http://matplotlib.sf.net for more information.')
#native python
import sys
import os
import pickle
class EventManager(gtk.Frame):
_et = None
_fn = None
_keylist = None
def __init__(self, label=""):
gtk.Frame.__init__(self,label)
self.vbox=gtk.VBox()
self.tb_box = gtk.HBox()
self.add(self.vbox)
self.vbox.pack_start(self.tb_box,expand=False)
self.tb = gtk.Toolbar()
self.tooltips = gtk.Tooltips()
self.tb.set_style(gtk.TOOLBAR_ICONS)
self.add_toolbutton_from_stock(gtk.STOCK_OPEN, 'Load', 'Load an EventTable from a file', 'Private', self.load_et)
self.add_toolbutton_from_stock(gtk.STOCK_SAVE, 'Save', 'Save the EventTable back to the original file', 'Private', self.save_et, False)
self.add_toolbutton_from_stock(gtk.STOCK_SAVE_AS, 'Save to', 'Save the EventTable to a file, choose new file', 'Private', self.save_et, True)
self.tb.insert(gtk.SeparatorToolItem(),-1)
self.add_toolbutton_eegpy("add_trigger_type", "Add type", "Add a new trigger type", 'Private', self.cb_add_trigger_type, None)
self.add_toolbutton_eegpy("add_trigger", "Add trigger", "Add a new trigger", 'Private', self.cb_add_trigger, None)
self.tb_box.pack_start(self.tb,expand=True)
self.lb_fn = gtk.Label("New EventTable...")
self.lb_fn.set_max_width_chars(50)
self.lb_fn.set_justify(gtk.JUSTIFY_RIGHT)
self.tb_box.pack_end(self.lb_fn, expand=False)
#HBox für _keylist/triggerlist
self.pane_kl = gtk.HPaned()
self.vbox.pack_end(self.pane_kl)
self.setup_trees()
self._et = EventTable()
def setup_trees(self):
#First: Keys
self.tvsw_keys = gtk.ScrolledWindow()
self.tvsw_keys.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.tree_keys = gtk.TreeStore(gobject.TYPE_STRING)
#self.treeS = gtk.TreeModelSort(self.tree)
self.tv_keys = gtk.TreeView(self.tree_keys)
self.tv_keys.get_selection().connect("changed",self.key_selected)
#self.tv_keys.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
#renderer = gtk.CellRendererText()
#self.col1 = gtk.TreeViewColumn("File ...", renderer,text=0)
self.tv_keys.append_column(gtk.TreeViewColumn("Key", gtk.CellRendererText(),text=0))
#self.tv_keys.show()
self.tvsw_keys.add(self.tv_keys)
self.pane_kl.add1(self.tvsw_keys)
#Second: Triggers
self.tvsw_tr = gtk.ScrolledWindow()
self.tvsw_tr.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.tree_tr = gtk.TreeStore(gobject.TYPE_INT)
#self.treeS = gtk.TreeModelSort(self.tree)
self.tv_tr = gtk.TreeView(self.tree_tr)
self.tv_tr.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
#renderer = gtk.CellRendererText()
#self.col1 = gtk.TreeViewColumn("File ...", renderer,text=0)
self.tv_tr.append_column(gtk.TreeViewColumn("Timepoint", gtk.CellRendererText(),text=0))
#self.tv_keys.show()
#Setting up drag'n'drop
self.tv_tr.enable_model_drag_source( gtk.gdk.BUTTON1_MASK,
[('INT',0,0)],
gtk.gdk.ACTION_DEFAULT|
gtk.gdk.ACTION_MOVE)
self.tv_tr.enable_model_drag_dest([('INT',0,0)],
gtk.gdk.ACTION_DEFAULT)
self.tv_tr.connect("drag_data_get", self.tr_drag_get)
self.tv_tr.connect("drag_data_received", self.tr_drag_received)
self.tv_keys.connect("key_press_event", self.cb_key_pressed)
self.tv_tr.connect("key_press_event", self.cb_key_pressed)
self.tvsw_tr.add(self.tv_tr)
self.pane_kl.add2(self.tvsw_tr)
def add_toolbutton_eegpy(self, icon_name, text, tip_text, tip_private, clicked_function, clicked_param1=None):
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
iconw = eegpy.ui.icon.image_from_eegpy_stock(icon_name)
toolitem = gtk.ToolButton(iconw, text)
#toolitem = gtk.ToolButton(iconw)
toolitem.set_icon_widget(iconw)
toolitem.show_all()
toolitem.set_tooltip(self.tooltips, tip_text, tip_private)
toolitem.connect("clicked", clicked_function, clicked_param1)
#toolitem.connect("scroll_event", clicked_function)
self.tb.insert(toolitem, -1)
def add_toolbutton_from_stock(self, icon_name, text, tip_text, tip_private, clicked_function, clicked_param1=None):
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
iconw = gtk.Image()
iconw.set_from_stock(icon_name, iconSize)
toolitem = gtk.ToolButton(iconw, text)
#toolitem = gtk.ToolButton(iconw)
toolitem.set_icon_widget(iconw)
toolitem.show_all()
toolitem.set_tooltip(self.tooltips, tip_text, tip_private)
toolitem.connect("clicked", clicked_function, clicked_param1)
#toolitem.connect("scroll_event", clicked_function)
self.tb.insert(toolitem, -1)
def load_et(self,event,data):
dialog = gtk.FileChooserDialog("Open EventTable from file..", None, gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("eegpy EventTable or similar")
filter.add_pattern("*.evt")
filter.add_pattern("*.vmrk")
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.set_filename(dialog.get_filename())
#print dialog.get_filename(), 'selected'
elif response == gtk.RESPONSE_CANCEL:
print 'Closed, no files selected'
dialog.destroy()
def save_et(self, event, do_save_as = True):
if do_save_as == False:
self._et.save(self._fn)
else:
dialog = gtk.FileChooserDialog("Save EventTable to file...", None, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("eegpy EventTable")
filter.add_pattern("*.evt")
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
fn = dialog.get_filename()
print fn, 'selected'
dialog.destroy()
self._fn = fn
#Now save...
self._et.save(self._fn)
lbtext = ""
if len(fn)>40:
lbtext = "..."+fn[-37:]
self.lb_fn.set_text(lbtext)
#fh.close()
else:# response == gtk.RESPONSE_CANCEL:
dialog.destroy()
print 'Closed, no files selected'
pass
def set_filename(self,fn):
print fn, "selected for opening"
#success = False
try:
if not os.path.exists(fn):
raise ValueError("File doesn't exist")
self._et = EventTable(fn)
if len(self._et.keys())==0:
print self._et.keys()
raise ValueError("EventTable empty!")
self._fn = fn
except ValueError, e:
print "Error opening EventTable", e
self._et=None
self._fn=None
return False
lbtext = ""
if len(fn)>40:
lbtext = "..."+fn[-37:]
self.lb_fn.set_text(lbtext)
self.setup_keylist()
def setup_keylist(self):
#if self._tv!=None:
# try:
# self._keylist.hide()
# self._keylist.destroy()
# except Exception,e:
# print "Cannot destroy keylist"
#TODO: Real functionalityself.tvsw_keys = gtk.ScrolledWindow()
keys = self._et.keys()
keys.sort()
self.tree_keys.clear()
for k in keys:
iter = self.tree_keys.append(None)
self.tree_keys.set(iter, 0, k)
self.tree_keys.set_sort_column_id(0,gtk.SORT_ASCENDING)
self.show_all()
def setup_triggerlist(self, key):
self.tree_tr.clear()
for tr in self._et[key]:
#print tr
iter = self.tree_tr.append(None)
self.tree_tr.set(iter, 0, int(tr))
self.tree_tr.set_sort_column_id(0,gtk.SORT_ASCENDING)
def key_selected(self,treeselection,*args):
#print tv, path, col, args, self.tree_keys.get(self.tree_keys.get_iter(path),0)[0]
self.tv_tr.get_selection().unselect_all()
#self.tree_tr.clear()
paths = treeselection.get_selected_rows()[1]
if len(paths)>0:
iter = self.tree_keys.get_iter(paths[0])
key = self.tree_keys.get(iter,0)[0]
self.setup_triggerlist(key)
def cb_add_trigger_type(self,event,data):
dialog_label = gtk.Dialog("Choose name...", None, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_OK))
entry1 = gtk.Entry()
entry1.set_text("Trigger")
dialog_label.vbox.pack_start(entry1)
entry1.show()
response = dialog_label.run()
print response
if response == gtk.RESPONSE_OK:
trig_name = entry1.get_text()
print trig_name
else:
print "Adding trigger-type aborted by user."
dialog_label.destroy()
return False
dialog_label.destroy()
self.add_trigger_type(trig_name, [])
def cb_add_trigger(self,event,data):
dialog_label = gtk.Dialog("Add trigger...", None, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_OK))
dialog_label.vbox.pack_start(gtk.Label("Timepoint:"))
sb_time = gtk.SpinButton(gtk.Adjustment(0,0,100000000,1,1000))
dialog_label.vbox.pack_start(sb_time)
dialog_label.vbox.show_all()
response = dialog_label.run()
print response
if response == gtk.RESPONSE_OK:
time = sb_time.get_value()
print time
else:
print "Adding trigger aborted by user."
dialog_label.destroy()
return False
dialog_label.destroy()
self.add_trigger(time)
def add_trigger_type(self,key,ts=[]):
if not self._et.has_key(key):
self._et.add_trigger_type(key, ts)
self.setup_keylist()
self.tree_tr.clear()
def add_trigger(self,time):
#find out key
path = self.tv_keys.get_selection().get_selected_rows()[1][0]
iter = self.tree_keys.get_iter(path)
k = self.tree_keys.get(iter,0)[0]
if self._et.has_key(k):
self._et.add_trigger(k, time)
self.setup_triggerlist(k)
def tr_drag_get(self, treeview, context, selection, target_id, etime):
pathlist = treeview.get_selection().get_selected_rows()[1]
model = treeview.get_model()
iterlist = [model.get_iter(row) for row in pathlist]
datalist = [model.get(iter,0)[0] for iter in iterlist]
#print datalist
selection.set(selection.target,8,pickle.dumps(datalist))
#print "Drag_get: ", treeview, context, selection, target_id, etime
def tr_drag_received(self, treeview, context, x, y, selection, info, etime):
#print pickle.loads(selection.data)
datalist = pickle.loads(selection.data)
self.add_trigger(datalist[0])
#print "Drag_received:", treeview, context, x, y, selection, info, etime
def cb_key_pressed(self, widget, event, data=None):
keyname = gtk.gdk.keyval_name(event.keyval)
#print "Key %s (%d) was pressed in widget %s" % (keyname, event.keyval, str(widget))
if keyname == "Delete":
#find out key
path = self.tv_keys.get_selection().get_selected_rows()[1][0]
iter = self.tree_keys.get_iter(path)
k = self.tree_keys.get(iter,0)[0]
if widget==self.tv_keys:
self._et.remove(k)
self.setup_keylist()
self.tv_keys.get_selection().unselect_all()
self.tree_tr.clear()
if widget==self.tv_tr:
pathlist = self.tv_tr.get_selection().get_selected_rows()[1]
iterlist = [self.tree_tr.get_iter(row) for row in pathlist]
datalist = [self.tree_tr.get(iter,0)[0] for iter in iterlist]
for tr in datalist:
self._et.remove(k,tr)
self.setup_triggerlist(k)
class EventTableEditorWin(EegpyBaseWin):
programName = "eegpy: Frequency-Filtering"
# Konstruktor
def __init__(self):
EegpyBaseWin.__init__(self)
self.inner_pane.set_position(300)
self.em1 = EventManager("EventTable 1")
self.em1.tv_tr.get_selection().connect("changed",self.cb_plot_marks)#, "blue")
self.em2 = EventManager("EventTable 2")
self.em2.tv_tr.get_selection().connect("changed",self.cb_plot_marks)#, "red")
self.pane_edit = gtk.HPaned()
self.upper_hbox.pack_start(self.pane_edit)
self.pane_edit.add1(self.em1)
self.pane_edit.pack2(self.em2,False)
self.pane_edit.set_position(self.get_size()[0]/2)
#self.setupOptions()
self.show_all()
#self.setupGUI()
def setupGUI(self):
EegpyBaseWin.setupGUI(self)
def cb_plot_marks(self, treeselection, *args):
#print "Color", color
self.a.cla()
pathlist = self.em1.tv_tr.get_selection().get_selected_rows()[1]
iterlist = [self.em1.tree_tr.get_iter(row) for row in pathlist]
datalist1 = [self.em1.tree_tr.get(iter,0)[0] for iter in iterlist]
pathlist = self.em2.tv_tr.get_selection().get_selected_rows()[1]
iterlist = [self.em2.tree_tr.get_iter(row) for row in pathlist]
datalist2 = [self.em2.tree_tr.get(iter,0)[0] for iter in iterlist]
#print datalist1, datalist2
for i in datalist1:
# print i,
self.a.axvline(i, lw=1, color="blue", ymin=0.5, ymax=1)
#self.a.plot(datalist1,numpy.zeros(len(datalist1)),"bD")
#self.a.plot(datalist2,numpy.ones(len(datalist2)),"rD")
#print ""
for i in datalist2:
# print i,
self.a.axvline(i, lw=1, color="red", ymin=0, ymax=0.5)
#print ""
# if len(datalist1) == 1:
# self.a.set_xlim(datalist1[0]-1000,datalist1[0]+1000)
# elif len(datalist2)==1:
# self.a.set_xlim(datalist2[0]-1000,datalist2[0]+1000)
# else:
# self.a.autoscale_view()
# elif:
# xlim0 = max(min(datalist1),min(datalist2))-500
# xlim1 = min(max(datalist1),max(datalist2))+500
# if xlim1<xlim0:
# xlim0 = min(min(datalist1),min(datalist2))-500
# xlim1 = max(max(datalist1),max(datalist2))+500
# self.a.set_xlim(xlim0,xlim1)
#self.a.set_xlim(numpy.array(datalist1+datalist2).min()-1000,numpy.array(datalist1+datalist2).max()+1000)
self.a.set_ylim(0,1)
self.a.set_yticks([])
self.canvas.draw()
def main():
gtk.main()
return 0
if __name__ == "__main__":
etew = EventTableEditorWin()
main()
| 41.043981
| 184
| 0.61525
|
4a02a6437b00502b8d9a396c6abc17a465aa253b
| 4,366
|
py
|
Python
|
flask_app/match_score.py
|
crazynayan/ipl2020
|
b7fcbc326452807b88d5cad6706d886661c178b1
|
[
"MIT"
] | 1
|
2022-03-04T19:44:36.000Z
|
2022-03-04T19:44:36.000Z
|
flask_app/match_score.py
|
crazynayan/ipl2020
|
b7fcbc326452807b88d5cad6706d886661c178b1
|
[
"MIT"
] | 4
|
2020-09-14T12:59:04.000Z
|
2022-03-17T09:15:41.000Z
|
flask_app/match_score.py
|
crazynayan/ipl2020
|
b7fcbc326452807b88d5cad6706d886661c178b1
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Tuple
from firestore_ci import FirestoreDocument
from config import Config
class ScoreData:
def __init__(self, score_data: Dict):
self.score_data = score_data
def get_value(self, pid: str, key: str, score_type: str) -> float:
player_record = next((player for team in self.score_data[score_type] for player in team["scores"]
if player["pid"] == pid), dict())
if not player_record or key not in player_record:
return 0.0
try:
value = float(player_record[key])
except ValueError:
value = 0.0
return value
def get_runs(self, pid: str) -> int:
return int(self.get_value(pid, "R", "batting"))
def get_fours(self, pid: str) -> int:
return int(self.get_value(pid, "4s", "batting"))
def get_sixes(self, pid: str) -> int:
return int(self.get_value(pid, "6s", "batting"))
def get_overs(self, pid: str) -> float:
return self.get_value(pid, "O", "bowling")
def get_wickets(self, pid: str) -> int:
return int(self.get_value(pid, "W", "bowling"))
def get_economy_rate(self, pid: str) -> float:
return self.get_value(pid, "Econ", "bowling")
def get_man_of_the_match(self, pid: str) -> bool:
if "man-of-the-match" not in self.score_data:
return False
if "pid" not in self.score_data["man-of-the-match"]:
return False
if pid != self.score_data["man-of-the-match"]["pid"]:
return False
return True
def get_playing_xi(self) -> List[Tuple[str, str]]:
return [(player["pid"], player["name"]) for team in self.score_data["team"] for player in team["players"]]
class MatchPlayer(FirestoreDocument):
def __init__(self):
super().__init__()
self.player_id: str = str()
self.player_name: str = str()
self.team: str = str()
self.match_id: str = str()
self.owner: str = str()
self.gameweek: int = int()
self.type: str = Config.NORMAL
self.runs: int = int()
self.fours: int = int()
self.sixes: int = int()
self.overs: float = float()
self.wickets: int = int()
self.economy_rate: float = float()
self.man_of_the_match: bool = bool()
@property
def batting_points(self) -> int:
score = self.runs + self.fours * 2 + self.sixes * 3
if self.runs >= 50:
score += 10
if self.runs >= 100:
score += 20
return score
@property
def bowling_points(self) -> int:
score = self.wickets * 20
if not self.overs:
return score
if self.economy_rate < 2.0:
score += 50
elif self.economy_rate < 4.0:
score += 40
elif self.economy_rate < 6.0:
score += 30
elif self.economy_rate < 7.0:
score += 20
return score
@property
def man_of_the_match_points(self) -> int:
return 50 if self.man_of_the_match else 0
@property
def total_points(self):
return self.batting_points + self.bowling_points + self.man_of_the_match_points
@property
def adjusted_points(self) -> float:
if self.type == Config.CAPTAIN:
return float(self.total_points * 2)
elif self.type == Config.SUB:
return self.total_points / 2
return float(self.total_points)
@property
def display_class(self) -> str:
if self.type == Config.CAPTAIN:
return "table-success"
elif self.type == Config.SUB:
return "table-danger"
return str()
@property
def owner_full_name(self) -> str:
return Config.USER_LIST.get(self.owner.upper(), str())
def update_scores(self, score_data: ScoreData):
self.runs = score_data.get_runs(self.player_id)
self.fours = score_data.get_fours(self.player_id)
self.sixes = score_data.get_sixes(self.player_id)
self.wickets = score_data.get_wickets(self.player_id)
self.economy_rate = score_data.get_economy_rate(self.player_id)
self.overs = score_data.get_overs(self.player_id)
self.man_of_the_match = score_data.get_man_of_the_match(self.player_id)
MatchPlayer.init("match_players")
| 31.868613
| 114
| 0.599404
|
4a02a71d5be52aeb4c37e90002079a1efd8889ab
| 410
|
py
|
Python
|
utils/make_hash.py
|
AndroidModLoader/gta3sc
|
07504a7334eb67cfac14e1f788331d1ba2b9343a
|
[
"MIT"
] | 54
|
2016-06-22T22:26:58.000Z
|
2022-02-23T09:25:59.000Z
|
utils/make_hash.py
|
GTAResources/gta3sc
|
a4f3f16574c4e0461ff3c14f8a2839cf3040d952
|
[
"MIT"
] | 112
|
2016-06-21T22:52:17.000Z
|
2022-02-08T14:15:13.000Z
|
utils/make_hash.py
|
thelink2012/gta3sc
|
07504a7334eb67cfac14e1f788331d1ba2b9343a
|
[
"MIT"
] | 9
|
2016-06-24T22:27:55.000Z
|
2021-01-11T16:37:36.000Z
|
#!/usr/bin/env python2
"""
"""
import gta3sc
from gta3sc.config import one_at_a_time
import sys
def main(xmlfile):
config = gta3sc.read_config(xmlfile)
for cmd in config.commands:
cmd.hash = one_at_a_time(cmd.name)
config.save_config(xmlfile)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: make_hash.py <xmlfile>")
sys.exit(1)
main(sys.argv[1])
| 18.636364
| 46
| 0.653659
|
4a02a734f9b78e198293d4fdaff66e39655b57c7
| 213
|
py
|
Python
|
app/config/settings.py
|
emanuelaguna/rakm
|
2d24f32611c866ab00ca2f521b5a1cc0ec473492
|
[
"Ruby"
] | 1
|
2020-10-31T16:10:34.000Z
|
2020-10-31T16:10:34.000Z
|
app/config/settings.py
|
emanuelaguna/rakm
|
2d24f32611c866ab00ca2f521b5a1cc0ec473492
|
[
"Ruby"
] | null | null | null |
app/config/settings.py
|
emanuelaguna/rakm
|
2d24f32611c866ab00ca2f521b5a1cc0ec473492
|
[
"Ruby"
] | null | null | null |
"""app.config.settings.py"""
import os
# Load enviroment variables from .env file.
from dotenv import load_dotenv
load_dotenv()
# The port to serve the app application on.
PORT = int(os.getenv("PORT", "5000"))
| 19.363636
| 43
| 0.7277
|
4a02a7d5d12abc263b57f4f73bbe6cafad2c56e6
| 9,527
|
py
|
Python
|
pair-finder/pair_finder/pipeline/steps/get_jobs_from_travis_api.py
|
lxylxy123456/bugswarm
|
522ea76ede28811463efda7551509f07b3951961
|
[
"BSD-3-Clause"
] | 18
|
2019-12-27T06:53:39.000Z
|
2022-03-03T03:05:35.000Z
|
pair-finder/pair_finder/pipeline/steps/get_jobs_from_travis_api.py
|
lxylxy123456/bugswarm
|
522ea76ede28811463efda7551509f07b3951961
|
[
"BSD-3-Clause"
] | 13
|
2020-01-10T17:11:38.000Z
|
2021-12-13T20:34:38.000Z
|
pair-finder/pair_finder/pipeline/steps/get_jobs_from_travis_api.py
|
lxylxy123456/bugswarm
|
522ea76ede28811463efda7551509f07b3951961
|
[
"BSD-3-Clause"
] | 10
|
2020-01-10T17:36:57.000Z
|
2021-09-13T19:51:43.000Z
|
"""
Download metadata via the Travis API for all jobs for a repository.
"""
import os
import time
from threading import Lock
from typing import Any
from typing import Optional
from typing import Tuple
from requests.exceptions import RequestException
from bugswarm.common import log
from bugswarm.common.json import read_json
from bugswarm.common.json import write_json
from bugswarm.common.travis_wrapper import TravisWrapper
from bugswarm.common.rest_api.database_api import DatabaseAPI
from bugswarm.common.credentials import DATABASE_PIPELINE_TOKEN
from .step import Step
from .step import StepException
from ...utils import Utils
class GetJobsFromTravisAPI(Step):
def process(self, data: Any, context: dict) -> Optional[Any]:
repo = context['repo']
mined_build_exists = False
lock = Lock()
with lock:
travis = TravisWrapper()
last_mined_build_number = 0
if context['original_mined_project_metrics']['last_build_mined']['build_number']:
last_mined_build_number = context['original_mined_project_metrics']['last_build_mined']['build_number']
mined_build_exists = True
builds_json_file = Utils.get_repo_builds_api_result_file(repo)
builds_info_json_file = Utils.get_repo_builds_info_api_result_file(repo)
if os.path.isfile(builds_json_file):
build_list = read_json(builds_json_file)
else:
log.info('Getting the list of builds...')
start_time = time.time()
try:
if not mined_build_exists:
# gets all builds for project
builds = travis.get_builds_for_repo(repo)
else:
# gets the latest builds and stops mining after reaching our last mined build number
builds = travis.get_builds_for_repo(repo, last_mined_build_number)
except RequestException:
error_message = 'Encountered an error while downloading builds for repository {}.'.format(repo)
raise StepException(error_message)
build_list = list(builds)
write_json(builds_json_file, build_list)
log.info('Got the list of builds in', time.time() - start_time, 'seconds.')
if not build_list:
msg = 'Did not get any new builds for {}.'.format(repo)
raise StepException(msg)
if os.path.isfile(builds_info_json_file):
build_list = read_json(builds_info_json_file)
else:
log.info('Downloading build info for',
len(build_list),
'builds... This step may take several minutes for large repositories.')
start_time = time.time()
for idx, build in enumerate(build_list):
build_id = build['id']
try:
build_info = travis.get_build_info(build_id)
except RequestException:
error_message = 'Encountered an error while downloading build info for build {}.'.format(build_id)
raise StepException(error_message)
build['build_info'] = build_info
if (idx + 1) % 500 == 0:
log.info('Downloaded build info for', idx + 1, 'builds so far...')
write_json(builds_info_json_file, build_list)
log.info('Downloaded build info in', time.time() - start_time, 'seconds.')
# Now that we have data from the Travis API, restructure it so it appears as if it came from the database using
# the following query:
# SELECT j.job_id, j.job_number, j.config, j.result,
# b.build_id, b.number, b.finished_at, b.commit, b.branch, b.event_type, b.language,
# c.committed_at, c.compare_at, c.committer_name, c.message
# FROM jobs j
# LEFT JOIN builds b on b.build_id = j.build_id
# LEFT JOIN commits c on b.commit = c.sha
# WHERE j.repo_id = "<repo_id>"
jobs = []
leftover_build_list = []
highest_build_number = 0
highest_build_number_id = 0
# The 'build_list' will return at minimum 25 builds due to the response gathered from Travis API being a page.
# We will always set the 'highest_build_number/id' and skip builds that we have mined previously by checking if
# the 'build_number <= last_mined_build_number'
for build in build_list:
build_id = build['id']
build_number = int(build['number'])
if build_number > highest_build_number:
highest_build_number_id = build_id
highest_build_number = build_number
if build_number <= last_mined_build_number:
continue
for job in build['build_info']['matrix']:
j = {
'job_id': job['id'],
'job_number': job['number'],
'config': job['config'],
'result': job['result'],
'build_id': build['id'],
'number': build['number'],
'finished_at': job['finished_at'],
'commit': build['commit'],
'message': build['message'],
'branch': build['branch'],
'event_type': build['build_info']['event_type'],
'committed_at': build['build_info']['committed_at'],
'compare_at': build['build_info']['compare_url'],
'committer_name': build['build_info']['committer_name'],
}
if 'language' in job['config']:
language = job['config']['language']
else:
log.debug('Language not found in config, defaulting to ruby for job ID {}.'.format(job['id']))
language = 'ruby'
j['language'] = language
jobs.append(j)
leftover_build_list.append(build)
if not jobs:
msg = 'Did not get any jobs for {}.'.format(repo)
# Set the build_number & build_id metric to the latest build info we've received if no jobs are found.
bugswarmapi = DatabaseAPI(DATABASE_PIPELINE_TOKEN)
bugswarmapi.set_latest_build_info_metric(repo, highest_build_number, highest_build_number_id)
raise StepException(msg)
# Expose mining progression metrics via the context. Other pipeline steps must not change these values.
# Do not raise a StepException before the context is populated.
failed_builds, failed_pr_builds = GetJobsFromTravisAPI._count_failed_builds(leftover_build_list)
failed_jobs, failed_pr_jobs = GetJobsFromTravisAPI._count_failed_jobs(leftover_build_list)
context['mined_project_builder'].builds = len(leftover_build_list) + \
context['original_mined_project_metrics']['progression_metrics']['builds']
context['mined_project_builder'].jobs = len(jobs) + \
context['original_mined_project_metrics']['progression_metrics']['jobs']
context['mined_project_builder'].failed_builds = failed_builds + \
context['original_mined_project_metrics']['progression_metrics']['failed_builds']
context['mined_project_builder'].failed_jobs = failed_jobs + \
context['original_mined_project_metrics']['progression_metrics']['failed_jobs']
context['mined_project_builder'].failed_pr_builds = failed_pr_builds + \
context['original_mined_project_metrics']['progression_metrics']['failed_pr_builds']
context['mined_project_builder'].failed_pr_jobs = failed_pr_jobs + \
context['original_mined_project_metrics']['progression_metrics']['failed_pr_jobs']
context['mined_project_builder'].last_build_mined['build_id'] = highest_build_number_id
context['mined_project_builder'].last_build_mined['build_number'] = highest_build_number
return jobs
@staticmethod
def _count_failed_builds(build_list) -> Tuple[int, int]:
failed_builds = 0
failed_pr_builds = 0
for b in build_list:
if b['build_info']['result'] == 0:
# The build succeeded, so don't count it.
continue
is_pr = b['event_type'] == 'pull_request'
if is_pr:
failed_pr_builds += 1
else:
failed_builds += 1
return failed_builds, failed_pr_builds
@staticmethod
def _count_failed_jobs(build_list) -> Tuple[int, int]:
failed_jobs = 0
failed_pr_jobs = 0
for b in build_list:
is_pr = b['event_type'] == 'pull_request'
for j in b['build_info']['matrix']:
# This condition accounts for when the Travis API returns a null job result. In those cases, assume the
# build did not succeed.
# A brief investigation suggests that the result is null when the job errored. See an example at
# https://api.travis-ci.org/jobs/49217775. The corresponding Travis page with a GUI is at
# https://travis-ci.org/gwtbootstrap3/gwtbootstrap3/jobs/49217775.
if j.get('result') != 0:
if is_pr:
failed_pr_jobs += 1
else:
failed_jobs += 1
return failed_jobs, failed_pr_jobs
| 47.874372
| 119
| 0.614254
|
4a02a7f106ecc78c47b3bf2ec8949d5a991458bd
| 24,289
|
py
|
Python
|
petgem/preprocessing.py
|
ocastilloreyes/petgem
|
3b6ab012874d2e1787c7644acab889d9d4d748c7
|
[
"BSD-3-Clause"
] | 20
|
2018-11-08T19:04:59.000Z
|
2022-03-22T22:49:54.000Z
|
petgem/preprocessing.py
|
ocastilloreyes/petgem
|
3b6ab012874d2e1787c7644acab889d9d4d748c7
|
[
"BSD-3-Clause"
] | 1
|
2019-08-17T08:18:16.000Z
|
2019-08-17T11:46:22.000Z
|
petgem/preprocessing.py
|
ocastilloreyes/petgem
|
3b6ab012874d2e1787c7644acab889d9d4d748c7
|
[
"BSD-3-Clause"
] | 9
|
2018-07-18T14:59:27.000Z
|
2021-10-15T08:58:13.000Z
|
#!/usr/bin/env python3
# Author: Octavio Castillo Reyes
# Contact: octavio.castillo@bsc.es
"""Define data preprocessing operations for **PETGEM**."""
# ---------------------------------------------------------------
# Load python modules
# ---------------------------------------------------------------
import numpy as np
import h5py
import meshio
from scipy.spatial import Delaunay
from petsc4py import PETSc
# ---------------------------------------------------------------
# Load petgem modules (BSC)
# ---------------------------------------------------------------
from .common import Print, Timers, measure_all_class_methods
from .parallel import MPIEnvironment, createSequentialDenseMatrixWithArray
from .parallel import writeParallelDenseMatrix, createSequentialVectorWithArray
from .parallel import writePetscVector
from .mesh import computeEdges, computeBoundaryEdges, computeFacesEdges
from .mesh import computeFaces, computeBoundaryFaces
from .mesh import computeBoundaryElements, computeBoundaries, computeFacePlane
from .hvfem import computeConnectivityDOFS
# ###############################################################
# ################ CLASSES DEFINITION ##################
# ###############################################################
@measure_all_class_methods
class Preprocessing():
"""Class for preprocessing."""
def __init__(self):
"""Initialization of a preprocessing class."""
return
def run(self, inputSetup):
"""Run a preprocessing task.
:param obj inputSetup: inputSetup object.
:return: None
"""
# ---------------------------------------------------------------
# Obtain the MPI environment
# ---------------------------------------------------------------
parEnv = MPIEnvironment()
# Start timer
Timers()["Preprocessing"].start()
# ---------------------------------------------------------------
# Preprocessing (sequential task)
# ---------------------------------------------------------------
if( parEnv.rank == 0 ):
# Parameters shortcut (for code legibility)
model = inputSetup.model
run = inputSetup.run
output = inputSetup.output
out_dir = output.get('directory_scratch')
# Compute number of dofs per element
basis_order = run.get('nord')
num_dof_in_element = np.int(basis_order*(basis_order+2)*(basis_order+3)/2)
if (model.get('mode') == 'csem'):
mode = 'csem'
elif (model.get('mode') == 'mt'):
mode = 'mt'
# Get data model
data_model = model.get(mode)
# ---------------------------------------------------------------
# Import mesh file
# ---------------------------------------------------------------
mesh_file = model.get('mesh')
# Import mesh
mesh = meshio.read(mesh_file)
# Number of elements
size = mesh.cells[0][1][:].shape
nElems = size[0]
# ---------------------------------------------------------------
# Preprocessing nodal coordinates
# ---------------------------------------------------------------
Print.master(' Nodal coordinates')
# Build coordinates in PETGEM format where each row
# represent the xyz coordinates of the 4 tetrahedral element
num_dimensions = 3
num_nodes_per_element = 4
data = mesh.points[mesh.cells[0][1][:], :]
data = data.reshape(nElems, num_dimensions*num_nodes_per_element)
# Get matrix dimensions
size = data.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)
# Build path to save the file
out_path = out_dir + '/nodes.dat'
# Write PETGEM nodes in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# ---------------------------------------------------------------
# Preprocessing mesh connectivity
# ---------------------------------------------------------------
Print.master(' Mesh connectivity')
# Get matrix dimensions
size = mesh.cells[0][1][:].shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], mesh.cells[0][1][:])
# Build path to save the file
out_path = out_dir + '/meshConnectivity.dat'
# Write PETGEM connectivity in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# ---------------------------------------------------------------
# Preprocessing edges connectivity
# ---------------------------------------------------------------
Print.master(' Edges connectivity')
# Compute edges
elemsE, edgesNodes = computeEdges(mesh.cells[0][1][:], nElems)
nEdges = edgesNodes.shape[0]
# Get matrix dimensions
size = elemsE.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], elemsE)
# Build path to save the file
out_path = out_dir + '/edges.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# Reshape edgesNodes and save
num_nodes_per_edge = 2
num_edges_per_element = 6
data = np.array((edgesNodes[elemsE[:], :]), dtype=np.float)
data = data.reshape(nElems, num_nodes_per_edge*num_edges_per_element)
# Get matrix dimensions
size = data.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)
# Build path to save the file
out_path = out_dir + '/edgesNodes.dat'
# Write PETGEM edgesNodes in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# ---------------------------------------------------------------
# Preprocessing faces connectivity
# ---------------------------------------------------------------
Print.master(' Faces connectivity')
# Compute faces
elemsF, facesN = computeFaces(mesh.cells[0][1][:], nElems)
nFaces = facesN.shape[0]
# Get matrix dimensions
size = elemsF.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], elemsF)
# Build path to save the file
out_path = out_dir + '/faces.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# ---------------------------------------------------------------
# Preprocessing faces-edges connectivity
# ---------------------------------------------------------------
Print.master(' Faces-edges connectivity')
facesE = computeFacesEdges(elemsF, elemsE, nFaces, nElems)
num_faces_per_element = 4
num_edges_per_face = 3
data = np.array((facesE[elemsF[:], :]), dtype=np.float)
data = data.reshape(nElems, num_faces_per_element*num_edges_per_face)
# Get matrix dimensions
size = data.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)
# Build path to save the file
out_path = out_dir + '/facesEdges.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
del matrix
# ---------------------------------------------------------------
# Preprocessing dofs connectivity
# ---------------------------------------------------------------
Print.master(' DOFs connectivity')
# Compute degrees of freedom connectivity
basis_order = run.get('nord')
dofs, dof_edges, dof_faces, _, total_num_dofs = computeConnectivityDOFS(elemsE,elemsF,basis_order)
# Get matrix dimensions
size = dofs.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], dofs)
# Build path to save the file
out_path = out_dir + '/dofs.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
del matrix
# ---------------------------------------------------------------
# Preprocessing sigma model
# ---------------------------------------------------------------
Print.master(' Conductivity model')
i_model = data_model.get('sigma')
if (run.get('conductivity_from_file')):
# Open sigma file
sigma_file = i_model.get('file')
fileID = h5py.File(sigma_file, 'r')
# Read sigma file
conductivityModel = fileID.get('data')[()]
else:
# Get physical groups
elemsS = mesh.cell_data['gmsh:physical'][0]
elemsS -= np.int(1) # 0-based indexing
# Get horizontal sigma
horizontal_sigma = i_model.get('horizontal')
vertical_sigma = i_model.get('vertical')
# Allocate conductivity array
conductivityModel = np.zeros((nElems, 2), dtype=np.float)
for i in np.arange(nElems):
# Set horizontal sigma
conductivityModel[i, 0] = horizontal_sigma[np.int(elemsS[i])]
# Set vertical sigma
conductivityModel[i, 1] = vertical_sigma[np.int(elemsS[i])]
# Get matrix dimensions
size = conductivityModel.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], conductivityModel)
# Build path to save the file
out_path = out_dir + '/conductivityModel.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
del matrix
# ---------------------------------------------------------------
# Preprocessing boundaries
# ---------------------------------------------------------------
Print.master(' Boundaries')
# Compute boundary faces
bFacesN, bFaces, nbFaces = computeBoundaryFaces(elemsF, facesN)
# Build array with boundary dofs for csem mode (dirichlet BC)
if (mode == 'csem'):
# Compute boundary edges
bEdges = computeBoundaryEdges(edgesNodes, bFacesN)
# Compute dofs on boundaries
_, indx_boundary_dofs = computeBoundaries(dofs, dof_edges, dof_faces, bEdges, bFaces, basis_order);
# Build PETSc structures
vector = createSequentialVectorWithArray(indx_boundary_dofs)
# Build path to save the file
out_path = out_dir + '/boundaries.dat'
# Write PETGEM nodes in PETSc format
writePetscVector(out_path, vector, communicator=PETSc.COMM_SELF)
del vector
elif (mode == 'mt'):
# Compute to what plane the boundary face belongs
planeFace = computeFacePlane(mesh.points, bFaces, bFacesN)
# Compute boundary elements
bElems, numbElems = computeBoundaryElements(elemsF, bFaces, nFaces)
if (nbFaces != numbElems):
Print.master(' Number of boundary faces is not consistent.')
exit(-1)
# Allocate
data_boundaries = np.zeros((nbFaces, 53+num_dof_in_element), dtype=np.float)
# Fill tmp matrix with data for boundary faces
for i in np.arange(nbFaces):
# Get index of tetrahedral element (boundary element)
iEle = bElems[i]
# Get dofs of element container
dofsElement = dofs[iEle, :]
# Get indexes of nodes for i-boundary element and insert
nodesBoundaryElement = mesh.cells[0][1][iEle,:]
data_boundaries[i, 0:4] = nodesBoundaryElement
# Get nodes coordinates for i-boundary element and insert
coordEle = mesh.points[nodesBoundaryElement, :]
coordEle = coordEle.flatten()
data_boundaries[i, 4:16] = coordEle
# Get indexes of faces for i-boundary element and insert
facesBoundaryElement = elemsF[iEle, :]
data_boundaries[i, 16:20] = facesBoundaryElement
# Get edges indexes for faces in i-boundary element and insert
edgesBoundaryFace = facesE[facesBoundaryElement, :]
edgesBoundaryFace = edgesBoundaryFace.flatten()
data_boundaries[i, 20:32] = edgesBoundaryFace
# Get indexes of edges for i-boundary and insert
edgesBoundaryElement = elemsE[iEle, :]
data_boundaries[i, 32:38] = edgesBoundaryElement
# Get node indexes for edges in i-boundary and insert
edgesNodesBoundaryElement = edgesNodes[edgesBoundaryElement, :]
edgesNodesBoundaryElement = edgesNodesBoundaryElement.flatten()
data_boundaries[i, 38:50] = edgesNodesBoundaryElement
# Get plane face
ifacetype = planeFace[i]
data_boundaries[i, 50] = ifacetype
# Get global face index
localFaceIndex = bFaces[i]
data_boundaries[i, 51] = localFaceIndex
# Get sigma value
sigmaEle = conductivityModel[iEle, 0]
data_boundaries[i, 52] = sigmaEle
# Get dofs for boundary element and insert
dofsBoundaryElement = dofsElement
data_boundaries[i, 53::] = dofsBoundaryElement
# Get matrix dimensions
size = data_boundaries.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data_boundaries)
# Build path to save the file
out_path = out_dir + '/boundaryElements.dat'
# Write PETGEM receivers in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
del matrix
del data_boundaries
# ---------------------------------------------------------------
# Preprocessing receivers
# ---------------------------------------------------------------
Print.master(' Receivers')
# Open receivers_file
receivers_file = model.get('receivers')
fileID = h5py.File(receivers_file, 'r')
# Read receivers
receivers = fileID.get('data')[()]
# Number of receivers
if receivers.ndim == 1:
nReceivers = 1
else:
dim = receivers.shape
nReceivers = dim[0]
# Find out which tetrahedral element source point is in (only for csem mode)
if (mode == 'csem'):
# Allocate vector to save source data
data_source = np.zeros(50+num_dof_in_element, dtype=np.float)
i_model = data_model.get('source')
# Get source position
i_source_position = np.asarray(i_model.get('position'), dtype=np.float)
# Build Delaunay triangulation with nodes
tri = Delaunay(mesh.points)
# Overwrite Delaunay structure with mesh_file connectivity and points
tri.simplices = mesh.cells[0][1][:].astype(np.int32)
tri.vertices = mesh.cells[0][1][:].astype(np.int32)
srcElem = tri.find_simplex(i_source_position, bruteforce=True, tol=1.e-12)
# If srcElem=-1, source not located
if srcElem < 0:
Print.master(' Source no located in the computational domain. Please, verify source position or improve the mesh quality.')
exit(-1)
# Build data for source insertion
# Get indexes of nodes for srcElem and insert
nodesSource = mesh.cells[0][1][srcElem,:]
data_source[0:4] = nodesSource
# Get nodes coordinates for srcElem and insert
coordSource = mesh.points[nodesSource, :]
coordSource = coordSource.flatten()
data_source[4:16] = coordSource
# Get indexes of faces for srcElem and insert
facesSource = elemsF[srcElem, :]
data_source[16:20] = facesSource
# Get edges indexes for faces in srcElem and insert
edgesFace = facesE[facesSource, :]
edgesFace = edgesFace.flatten()
data_source[20:32] = edgesFace
# Get indexes of edges for srcElem and insert
edgesSource = elemsE[srcElem, :]
data_source[32:38] = edgesSource
# Get node indexes for edges in srcElem and insert
edgesNodesSource = edgesNodes[edgesSource, :]
edgesNodesSource = edgesNodesSource.flatten()
data_source[38:50] = edgesNodesSource
# Get dofs for srcElem and insert
dofsSource = dofs[srcElem,:]
data_source[50::] = dofsSource
# Get matrix dimensions
size = data_source.shape
# Build PETSc structures
vector = createSequentialVectorWithArray(data_source)
# Build path to save the file
out_path = out_dir + '/source.dat'
# Write PETGEM nodes in PETSc format
writePetscVector(out_path, vector, communicator=PETSc.COMM_SELF)
del vector
# ---------------------------------------------------------------
# Sparsity pattern
# ---------------------------------------------------------------
# Setup valence for each basis order (adding a small percentage to keep safe)
valence = np.array([50, 200, 400, 800, 1400, 2500])
# Build nnz pattern for each row
nnz = np.full((total_num_dofs), valence[basis_order-1], dtype=np.int)
# Build PETSc structures
vector = createSequentialVectorWithArray(nnz)
# Build path to save the file
out_path = out_dir + '/nnz.dat'
# Write PETGEM nodes in PETSc format
writePetscVector(out_path, vector, communicator=PETSc.COMM_SELF)
# ---------------------------------------------------------------
# Print mesh statistics
# ---------------------------------------------------------------
Print.master(' ')
Print.master(' Mesh statistics')
Print.master(' Mesh file: {0:12}'.format(str(model.get('mesh'))))
Print.master(' Number of elements: {0:12}'.format(str(nElems)))
Print.master(' Number of faces: {0:12}'.format(str(nFaces)))
Print.master(' Number of edges: {0:12}'.format(str(nEdges)))
Print.master(' Number of dofs: {0:12}'.format(str(total_num_dofs)))
if (mode == 'csem'):
Print.master(' Number of boundaries: {0:12}'.format(str(len(indx_boundary_dofs))))
# ---------------------------------------------------------------
# Print data model
# ---------------------------------------------------------------
Print.master(' ')
Print.master(' Model data')
Print.master(' Modeling mode: {0:12}'.format(str(mode)))
i_sigma = data_model.get('sigma')
if (run.get('conductivity_from_file')):
Print.master(' Conductivity file: {0:12}'.format(i_sigma.get('file')))
else:
Print.master(' Horizontal conductivity: {0:12}'.format(str(i_sigma.get('horizontal'))))
Print.master(' Vertical conductivity: {0:12}'.format(str(i_sigma.get('vertical'))))
if (mode == 'csem'):
i_source = data_model.get('source')
Print.master(' Source:')
Print.master(' - Frequency (Hz): {0:12}'.format(str(i_source.get('frequency'))))
Print.master(' - Position (xyz): {0:12}'.format(str(i_source.get('position'))))
Print.master(' - Azimuth: {0:12}'.format(str(i_source.get('azimuth'))))
Print.master(' - Dip: {0:12}'.format(str(i_source.get('dip'))))
Print.master(' - Current: {0:12}'.format(str(i_source.get('current'))))
Print.master(' - Length: {0:12}'.format(str(i_source.get('length'))))
else:
Print.master(' Frequency (Hz): {0:12}'.format(str(data_model.get('frequency'))))
Print.master(' Polarization: {0:12}'.format(str(data_model.get('polarization'))))
Print.master(' Vector basis order: {0:12}'.format(str(basis_order)))
Print.master(' Receivers file: {0:12}'.format(str(model.get('receivers'))))
Print.master(' Number of receivers: {0:12}'.format(str(nReceivers)))
Print.master(' VTK output: {0:12}'.format(str(output.get('vtk'))))
Print.master(' Cuda support: {0:12}'.format(str(run.get('cuda'))))
Print.master(' Output directory: {0:12}'.format(str(output.get('directory'))))
Print.master(' Scratch directory: {0:12}'.format(str(output.get('directory_scratch'))))
# Stop timer
Timers()["Preprocessing"].stop()
# Apply barrier for MPI tasks alignement
parEnv.comm.barrier()
return
# ###############################################################
# ################ FUNCTIONS DEFINITION #################
# ###############################################################
def unitary_test():
"""Unitary test for preprocessing.py script."""
# ###############################################################
# ################ MAIN #################
# ###############################################################
if __name__ == '__main__':
# ---------------------------------------------------------------
# Run unitary test
# ---------------------------------------------------------------
unitary_test()
| 43.763964
| 150
| 0.489728
|
4a02a8252c9608966f3f125316c0f2fda689c4e0
| 2,685
|
py
|
Python
|
cli/polyaxon/polypod/custom_resources/job.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
cli/polyaxon/polypod/custom_resources/job.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | 1
|
2022-01-24T11:26:47.000Z
|
2022-03-18T23:17:58.000Z
|
cli/polyaxon/polypod/custom_resources/job.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
from polyaxon.k8s import k8s_schemas
from polyaxon.k8s.custom_resources.operation import get_operation_custom_object
from polyaxon.polyflow import V1Environment, V1Notification, V1Termination
from polyaxon.polypod.common.setter import (
set_collect_logs,
set_notify,
set_sync_statuses,
set_termination,
)
from polyaxon.polypod.pod.spec import get_pod_spec, get_pod_template_spec
def get_job_custom_resource(
resource_name: str,
namespace: str,
main_container: k8s_schemas.V1Container,
sidecar_containers: Optional[List[k8s_schemas.V1Container]],
init_containers: Optional[List[k8s_schemas.V1Container]],
volumes: List[k8s_schemas.V1Volume],
termination: V1Termination,
collect_logs: bool,
sync_statuses: bool,
notifications: List[V1Notification],
environment: V1Environment,
labels: Dict[str, str],
annotations: Dict[str, str],
) -> Dict:
metadata, pod_spec = get_pod_spec(
namespace=namespace,
main_container=main_container,
sidecar_containers=sidecar_containers,
init_containers=init_containers,
resource_name=resource_name,
volumes=volumes,
environment=environment,
labels=labels,
annotations=annotations,
)
template_spec = {
"template": get_pod_template_spec(metadata=metadata, pod_spec=pod_spec)
}
custom_object = {"batchJobSpec": template_spec}
custom_object = set_termination(
custom_object=custom_object, termination=termination
)
custom_object = set_collect_logs(
custom_object=custom_object, collect_logs=collect_logs
)
custom_object = set_sync_statuses(
custom_object=custom_object, sync_statuses=sync_statuses
)
custom_object = set_notify(custom_object=custom_object, notifications=notifications)
return get_operation_custom_object(
namespace=namespace,
resource_name=resource_name,
labels=labels,
annotations=annotations,
custom_object=custom_object,
)
| 33.148148
| 88
| 0.741155
|
4a02a82726b13e3547e1da248566fd6f69aa89b4
| 337
|
py
|
Python
|
src/entities/__init__.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
src/entities/__init__.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
src/entities/__init__.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
from ._Category import Category
from ._Complement import Complement
from ._Customer import Customer
from ._Item import Item
from ._Printer import Printer
from ._Product import Product
from ._Restaurant import Restaurant
from ._Order import Order
from ._Table import Table
from ._Combo import Combo
from ._Caster import Caster
| 28.083333
| 36
| 0.804154
|
4a02a88084d30b030b8c0f5e5ac28dd92d407375
| 80,286
|
py
|
Python
|
tests/unit/gapic/dialogflow_v2beta1/test_contexts.py
|
martini9393/python-dialogflow
|
69bf02c733c7116840b15992f505cc298ed55b86
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/dialogflow_v2beta1/test_contexts.py
|
martini9393/python-dialogflow
|
69bf02c733c7116840b15992f505cc298ed55b86
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/dialogflow_v2beta1/test_contexts.py
|
martini9393/python-dialogflow
|
69bf02c733c7116840b15992f505cc298ed55b86
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2beta1.services.contexts import ContextsAsyncClient
from google.cloud.dialogflow_v2beta1.services.contexts import ContextsClient
from google.cloud.dialogflow_v2beta1.services.contexts import pagers
from google.cloud.dialogflow_v2beta1.services.contexts import transports
from google.cloud.dialogflow_v2beta1.types import context
from google.cloud.dialogflow_v2beta1.types import context as gcd_context
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import struct_pb2 as struct # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ContextsClient._get_default_mtls_endpoint(None) is None
assert ContextsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
ContextsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ContextsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ContextsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert ContextsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [ContextsClient, ContextsAsyncClient])
def test_contexts_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_contexts_client_get_transport_class():
transport = ContextsClient.get_transport_class()
assert transport == transports.ContextsGrpcTransport
transport = ContextsClient.get_transport_class("grpc")
assert transport == transports.ContextsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ContextsClient, transports.ContextsGrpcTransport, "grpc"),
(ContextsAsyncClient, transports.ContextsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
ContextsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ContextsClient)
)
@mock.patch.object(
ContextsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ContextsAsyncClient),
)
def test_contexts_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ContextsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ContextsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ContextsClient, transports.ContextsGrpcTransport, "grpc", "true"),
(
ContextsAsyncClient,
transports.ContextsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(ContextsClient, transports.ContextsGrpcTransport, "grpc", "false"),
(
ContextsAsyncClient,
transports.ContextsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
ContextsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ContextsClient)
)
@mock.patch.object(
ContextsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ContextsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_contexts_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
ssl_channel_creds = mock.Mock()
with mock.patch(
"grpc.ssl_channel_credentials", return_value=ssl_channel_creds
):
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
ssl_channel_credentials=expected_ssl_channel_creds,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.__init__", return_value=None
):
with mock.patch(
"google.auth.transport.grpc.SslCredentials.is_mtls",
new_callable=mock.PropertyMock,
) as is_mtls_mock:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.ssl_credentials",
new_callable=mock.PropertyMock,
) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = (
ssl_credentials_mock.return_value
)
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
ssl_channel_credentials=expected_ssl_channel_creds,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.__init__", return_value=None
):
with mock.patch(
"google.auth.transport.grpc.SslCredentials.is_mtls",
new_callable=mock.PropertyMock,
) as is_mtls_mock:
is_mtls_mock.return_value = False
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ContextsClient, transports.ContextsGrpcTransport, "grpc"),
(ContextsAsyncClient, transports.ContextsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_contexts_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ContextsClient, transports.ContextsGrpcTransport, "grpc"),
(ContextsAsyncClient, transports.ContextsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_contexts_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_contexts_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.contexts.transports.ContextsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = ContextsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_contexts(
transport: str = "grpc", request_type=context.ListContextsRequest
):
client = ContextsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.ListContextsResponse(
next_page_token="next_page_token_value",
)
response = client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == context.ListContextsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListContextsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_contexts_from_dict():
test_list_contexts(request_type=dict)
@pytest.mark.asyncio
async def test_list_contexts_async(
transport: str = "grpc_asyncio", request_type=context.ListContextsRequest
):
client = ContextsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
context.ListContextsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == context.ListContextsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListContextsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_contexts_async_from_dict():
await test_list_contexts_async(request_type=dict)
def test_list_contexts_field_headers():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = context.ListContextsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
call.return_value = context.ListContextsResponse()
client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_contexts_field_headers_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = context.ListContextsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
context.ListContextsResponse()
)
await client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_contexts_flattened():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.ListContextsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_contexts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_contexts_flattened_error():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_contexts(
context.ListContextsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_contexts_flattened_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.ListContextsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
context.ListContextsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_contexts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_contexts_flattened_error_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_contexts(
context.ListContextsRequest(), parent="parent_value",
)
def test_list_contexts_pager():
client = ContextsClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
context.ListContextsResponse(
contexts=[context.Context(), context.Context(), context.Context(),],
next_page_token="abc",
),
context.ListContextsResponse(contexts=[], next_page_token="def",),
context.ListContextsResponse(
contexts=[context.Context(),], next_page_token="ghi",
),
context.ListContextsResponse(
contexts=[context.Context(), context.Context(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_contexts(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, context.Context) for i in results)
def test_list_contexts_pages():
client = ContextsClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
context.ListContextsResponse(
contexts=[context.Context(), context.Context(), context.Context(),],
next_page_token="abc",
),
context.ListContextsResponse(contexts=[], next_page_token="def",),
context.ListContextsResponse(
contexts=[context.Context(),], next_page_token="ghi",
),
context.ListContextsResponse(
contexts=[context.Context(), context.Context(),],
),
RuntimeError,
)
pages = list(client.list_contexts(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_contexts_async_pager():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
context.ListContextsResponse(
contexts=[context.Context(), context.Context(), context.Context(),],
next_page_token="abc",
),
context.ListContextsResponse(contexts=[], next_page_token="def",),
context.ListContextsResponse(
contexts=[context.Context(),], next_page_token="ghi",
),
context.ListContextsResponse(
contexts=[context.Context(), context.Context(),],
),
RuntimeError,
)
async_pager = await client.list_contexts(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, context.Context) for i in responses)
@pytest.mark.asyncio
async def test_list_contexts_async_pages():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
context.ListContextsResponse(
contexts=[context.Context(), context.Context(), context.Context(),],
next_page_token="abc",
),
context.ListContextsResponse(contexts=[], next_page_token="def",),
context.ListContextsResponse(
contexts=[context.Context(),], next_page_token="ghi",
),
context.ListContextsResponse(
contexts=[context.Context(), context.Context(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_contexts(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_context(transport: str = "grpc", request_type=context.GetContextRequest):
client = ContextsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context(name="name_value", lifespan_count=1498,)
response = client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == context.GetContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, context.Context)
assert response.name == "name_value"
assert response.lifespan_count == 1498
def test_get_context_from_dict():
test_get_context(request_type=dict)
@pytest.mark.asyncio
async def test_get_context_async(
transport: str = "grpc_asyncio", request_type=context.GetContextRequest
):
client = ContextsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
context.Context(name="name_value", lifespan_count=1498,)
)
response = await client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == context.GetContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, context.Context)
assert response.name == "name_value"
assert response.lifespan_count == 1498
@pytest.mark.asyncio
async def test_get_context_async_from_dict():
await test_get_context_async(request_type=dict)
def test_get_context_field_headers():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = context.GetContextRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
call.return_value = context.Context()
client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_context_field_headers_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = context.GetContextRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context())
await client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_context_flattened():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_context(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_context_flattened_error():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_context(
context.GetContextRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_context_flattened_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_context(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_context_flattened_error_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_context(
context.GetContextRequest(), name="name_value",
)
def test_create_context(
transport: str = "grpc", request_type=gcd_context.CreateContextRequest
):
client = ContextsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_context.Context(name="name_value", lifespan_count=1498,)
response = client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_context.CreateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_context.Context)
assert response.name == "name_value"
assert response.lifespan_count == 1498
def test_create_context_from_dict():
test_create_context(request_type=dict)
@pytest.mark.asyncio
async def test_create_context_async(
transport: str = "grpc_asyncio", request_type=gcd_context.CreateContextRequest
):
client = ContextsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_context.Context(name="name_value", lifespan_count=1498,)
)
response = await client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_context.CreateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_context.Context)
assert response.name == "name_value"
assert response.lifespan_count == 1498
@pytest.mark.asyncio
async def test_create_context_async_from_dict():
await test_create_context_async(request_type=dict)
def test_create_context_field_headers():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_context.CreateContextRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
call.return_value = gcd_context.Context()
client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_context_field_headers_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_context.CreateContextRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_context.Context())
await client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_context_flattened():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_context(
parent="parent_value", context=gcd_context.Context(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].context == gcd_context.Context(name="name_value")
def test_create_context_flattened_error():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_context(
gcd_context.CreateContextRequest(),
parent="parent_value",
context=gcd_context.Context(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_context_flattened_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_context(
parent="parent_value", context=gcd_context.Context(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].context == gcd_context.Context(name="name_value")
@pytest.mark.asyncio
async def test_create_context_flattened_error_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_context(
gcd_context.CreateContextRequest(),
parent="parent_value",
context=gcd_context.Context(name="name_value"),
)
def test_update_context(
transport: str = "grpc", request_type=gcd_context.UpdateContextRequest
):
client = ContextsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_context.Context(name="name_value", lifespan_count=1498,)
response = client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_context.UpdateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_context.Context)
assert response.name == "name_value"
assert response.lifespan_count == 1498
def test_update_context_from_dict():
test_update_context(request_type=dict)
@pytest.mark.asyncio
async def test_update_context_async(
transport: str = "grpc_asyncio", request_type=gcd_context.UpdateContextRequest
):
client = ContextsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_context.Context(name="name_value", lifespan_count=1498,)
)
response = await client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_context.UpdateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_context.Context)
assert response.name == "name_value"
assert response.lifespan_count == 1498
@pytest.mark.asyncio
async def test_update_context_async_from_dict():
await test_update_context_async(request_type=dict)
def test_update_context_field_headers():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_context.UpdateContextRequest()
request.context.name = "context.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
call.return_value = gcd_context.Context()
client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context.name=context.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_context_field_headers_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_context.UpdateContextRequest()
request.context.name = "context.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_context.Context())
await client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context.name=context.name/value",) in kw[
"metadata"
]
def test_update_context_flattened():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_context(
context=gcd_context.Context(name="name_value"),
update_mask=field_mask.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].context == gcd_context.Context(name="name_value")
assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
def test_update_context_flattened_error():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_context(
gcd_context.UpdateContextRequest(),
context=gcd_context.Context(name="name_value"),
update_mask=field_mask.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_context_flattened_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_context(
context=gcd_context.Context(name="name_value"),
update_mask=field_mask.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].context == gcd_context.Context(name="name_value")
assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_context_flattened_error_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_context(
gcd_context.UpdateContextRequest(),
context=gcd_context.Context(name="name_value"),
update_mask=field_mask.FieldMask(paths=["paths_value"]),
)
def test_delete_context(
transport: str = "grpc", request_type=context.DeleteContextRequest
):
client = ContextsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == context.DeleteContextRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_context_from_dict():
test_delete_context(request_type=dict)
@pytest.mark.asyncio
async def test_delete_context_async(
transport: str = "grpc_asyncio", request_type=context.DeleteContextRequest
):
client = ContextsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == context.DeleteContextRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_context_async_from_dict():
await test_delete_context_async(request_type=dict)
def test_delete_context_field_headers():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = context.DeleteContextRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
call.return_value = None
client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_context_field_headers_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = context.DeleteContextRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_context_flattened():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_context(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_context_flattened_error():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_context(
context.DeleteContextRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_context_flattened_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_context(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_context_flattened_error_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_context(
context.DeleteContextRequest(), name="name_value",
)
def test_delete_all_contexts(
transport: str = "grpc", request_type=context.DeleteAllContextsRequest
):
client = ContextsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_all_contexts), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_all_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == context.DeleteAllContextsRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_all_contexts_from_dict():
test_delete_all_contexts(request_type=dict)
@pytest.mark.asyncio
async def test_delete_all_contexts_async(
transport: str = "grpc_asyncio", request_type=context.DeleteAllContextsRequest
):
client = ContextsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_all_contexts), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_all_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == context.DeleteAllContextsRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_all_contexts_async_from_dict():
await test_delete_all_contexts_async(request_type=dict)
def test_delete_all_contexts_field_headers():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = context.DeleteAllContextsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_all_contexts), "__call__"
) as call:
call.return_value = None
client.delete_all_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_all_contexts_field_headers_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = context.DeleteAllContextsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_all_contexts), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_all_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_delete_all_contexts_flattened():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_all_contexts), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_all_contexts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_delete_all_contexts_flattened_error():
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_all_contexts(
context.DeleteAllContextsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_delete_all_contexts_flattened_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_all_contexts), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_all_contexts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_delete_all_contexts_flattened_error_async():
client = ContextsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_all_contexts(
context.DeleteAllContextsRequest(), parent="parent_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ContextsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ContextsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ContextsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ContextsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.ContextsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ContextsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ContextsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = ContextsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ContextsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ContextsGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.ContextsGrpcTransport, transports.ContextsGrpcAsyncIOTransport],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ContextsClient(credentials=credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.ContextsGrpcTransport,)
def test_contexts_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.ContextsTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_contexts_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.contexts.transports.ContextsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ContextsTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_contexts",
"get_context",
"create_context",
"update_context",
"delete_context",
"delete_all_contexts",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_contexts_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2beta1.services.contexts.transports.ContextsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.ContextsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_contexts_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.dialogflow_v2beta1.services.contexts.transports.ContextsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.ContextsTransport()
adc.assert_called_once()
def test_contexts_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
ContextsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
def test_contexts_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.ContextsGrpcTransport(
host="squid.clam.whelk", quota_project_id="octopus"
)
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_contexts_host_no_port():
client = ContextsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_contexts_host_with_port():
client = ContextsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_contexts_grpc_transport_channel():
channel = grpc.insecure_channel("http://localhost/")
# Check that channel is used if provided.
transport = transports.ContextsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_contexts_grpc_asyncio_transport_channel():
channel = aio.insecure_channel("http://localhost/")
# Check that channel is used if provided.
transport = transports.ContextsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
"transport_class",
[transports.ContextsGrpcTransport, transports.ContextsGrpcAsyncIOTransport],
)
def test_contexts_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel", autospec=True
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
"transport_class",
[transports.ContextsGrpcTransport, transports.ContextsGrpcAsyncIOTransport],
)
def test_contexts_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel", autospec=True
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_context_path():
project = "squid"
session = "clam"
context = "whelk"
expected = "projects/{project}/agent/sessions/{session}/contexts/{context}".format(
project=project, session=session, context=context,
)
actual = ContextsClient.context_path(project, session, context)
assert expected == actual
def test_parse_context_path():
expected = {
"project": "octopus",
"session": "oyster",
"context": "nudibranch",
}
path = ContextsClient.context_path(**expected)
# Check that the path construction is reversible.
actual = ContextsClient.parse_context_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ContextsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = ContextsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ContextsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = ContextsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = ContextsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ContextsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = ContextsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = ContextsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ContextsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = ContextsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = ContextsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ContextsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ContextsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = ContextsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ContextsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ContextsTransport, "_prep_wrapped_messages"
) as prep:
client = ContextsClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ContextsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ContextsClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| 37.22114
| 111
| 0.68464
|
4a02a97ac177359c8e97d263ded4f6d61b8ae022
| 1,709
|
py
|
Python
|
src/distributed/rffl/rffl_clientmanager.py
|
Koukyosyumei/NAIST-Experiments
|
2795f6d7f59e7881ba4fe08a37881b8c2b7b4498
|
[
"Apache-2.0"
] | 4
|
2021-08-10T03:16:38.000Z
|
2021-08-17T13:26:49.000Z
|
src/distributed/rffl/rffl_clientmanager.py
|
Koukyosyumei/NAIST-Experiments
|
2795f6d7f59e7881ba4fe08a37881b8c2b7b4498
|
[
"Apache-2.0"
] | null | null | null |
src/distributed/rffl/rffl_clientmanager.py
|
Koukyosyumei/NAIST-Experiments
|
2795f6d7f59e7881ba4fe08a37881b8c2b7b4498
|
[
"Apache-2.0"
] | 2
|
2021-11-04T03:56:01.000Z
|
2022-02-14T06:22:12.000Z
|
import logging
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../")))
from core.utils import transform_list_to_grad
from distributed.inflator.inflator_client_manager import FedAVGInflatorClientManager
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../FedML/")))
from fedml_api.distributed.fedavg.FedAvgClientManager import FedAVGClientManager
from fedml_api.distributed.fedavg.message_define import MyMessage
from fedml_api.distributed.fedavg.utils import post_complete_message_to_sweep_process
class RFFLClientManager(FedAVGInflatorClientManager):
def handle_message_receive_model_from_server(self, msg_params):
logging.info("handle_message_receive_model_from_server.")
model_gradients = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX)
if self.args.is_mobile == 1:
model_gradients = transform_list_to_grad(model_gradients)
self.trainer.update_model_with_gradients(model_gradients)
self.trainer.update_dataset(int(client_index))
self.round_idx += 1
self.__train_with_inflation()
if self.round_idx == self.num_rounds - 1:
post_complete_message_to_sweep_process(self.args)
self.finish()
def __train_with_inflation(self):
logging.info(
"#######training with inflation########### round_id = %d" % self.round_idx
)
weights, local_sample_num = self.trainer.train(self.round_idx)
local_sample_num = int(local_sample_num * self.water_powered_magnification)
self.send_model_to_server(0, weights, local_sample_num)
| 43.820513
| 86
| 0.74488
|
4a02a9d1212371427c06e4fd1e313e75bbb594f3
| 420
|
py
|
Python
|
venv/Scripts/pip-script.py
|
frozzi18/Assignment
|
dfa6a463bf6b24ed0a346737be722ad00857309e
|
[
"MIT"
] | null | null | null |
venv/Scripts/pip-script.py
|
frozzi18/Assignment
|
dfa6a463bf6b24ed0a346737be722ad00857309e
|
[
"MIT"
] | null | null | null |
venv/Scripts/pip-script.py
|
frozzi18/Assignment
|
dfa6a463bf6b24ed0a346737be722ad00857309e
|
[
"MIT"
] | null | null | null |
#!C:\Users\ROZZI\PycharmProjects\Power\Assignment\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| 32.307692
| 73
| 0.67381
|
4a02a9e7a20278042b3eedf76488900072794733
| 1,075
|
py
|
Python
|
abstractFactory.py
|
findman/python-patterns
|
6fe37cd2df31ffc22369bed9fb592f34c67cd36e
|
[
"MIT"
] | null | null | null |
abstractFactory.py
|
findman/python-patterns
|
6fe37cd2df31ffc22369bed9fb592f34c67cd36e
|
[
"MIT"
] | null | null | null |
abstractFactory.py
|
findman/python-patterns
|
6fe37cd2df31ffc22369bed9fb592f34c67cd36e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from abc import ABCMeta
class StandardFactory(object):
'''这就是那个抽象工厂'''
@staticmethod
def get_factory(factory):
'''根据参数查找对实际操作的工厂'''
if factory == 'cat':
return CatFactory()
elif factory == 'dog':
return DogFactory()
raise TypeError('Unknow Factory')
class DogFactory(object):
def get_pet(self):
return Dog()
class CatFactory(object):
def get_pet(self):
return Cat()
# 可以认为dog和cat都是动物的一种,可以有个基类
class Pet(object):
# ABCMeta会让这个类在注册后添加很多基础抽象基类,可以看[https://docs.python.org/3/library/abc.html]
__metaclass__ = ABCMeta
def eat(self):
pass
# Dog应该做什么就在这里
class Dog(Pet):
def eat(self):
return 'Dog food...'
class Cat(Pet):
def eat(self):
return 'Cat food...'
if __name__ == "__main__":
factory = StandardFactory.get_factory('cat')
pet = factory.get_pet()
print(pet.eat())
factory = StandardFactory.get_factory('dog')
pet = factory.get_pet()
print(pet.eat())
| 18.859649
| 80
| 0.614884
|
4a02aa672e70a26beb535f5058d4dd7f65357c58
| 1,460
|
py
|
Python
|
aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/DeleteTagJobRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/DeleteTagJobRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/DeleteTagJobRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteTagJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'DeleteTagJob','imm')
def get_JobId(self):
return self.get_query_params().get('JobId')
def set_JobId(self,JobId):
self.add_query_param('JobId',JobId)
def get_Project(self):
return self.get_query_params().get('Project')
def set_Project(self,Project):
self.add_query_param('Project',Project)
def get_ClearIndexData(self):
return self.get_query_params().get('ClearIndexData')
def set_ClearIndexData(self,ClearIndexData):
self.add_query_param('ClearIndexData',ClearIndexData)
| 34.761905
| 71
| 0.760274
|
4a02ac4afa27275e48ae710d8792ba594c64d1d9
| 2,136
|
py
|
Python
|
server.py
|
DarknessRisesFromBelow/server-Updated-App
|
f218335e9cc82c0eb27b6309db7bc1745fc9a8be
|
[
"Unlicense"
] | null | null | null |
server.py
|
DarknessRisesFromBelow/server-Updated-App
|
f218335e9cc82c0eb27b6309db7bc1745fc9a8be
|
[
"Unlicense"
] | null | null | null |
server.py
|
DarknessRisesFromBelow/server-Updated-App
|
f218335e9cc82c0eb27b6309db7bc1745fc9a8be
|
[
"Unlicense"
] | null | null | null |
import socketserver
import os
from http.server import BaseHTTPRequestHandler, HTTPServer
version = 0
versionIncrementation = 1
strings = []
previousStringsContents = []
stringsContent = []
os.chdir("Your projects files path here.")
def GetLatestVersion():
global strings
global version
GetAllFilesContents()
if(GetAllFiles() != strings or stringsContent != previousStringsContents):
strings = GetAllFiles()
version = version + versionIncrementation
return version
else:
return 0
def GetAllFiles():
return os.listdir(".")
def GetAllFilesContents():
global stringsContent
global previousStringsContents
previousStringsContents = stringsContent
stringsContent = []
for i in range(0, len(GetAllFiles())):
stringsContent.append(GetFile(GetAllFiles()[i]))
return stringsContent
def GetFile(name):
if(name.startswith('.') == False and "." in name):
f = open("{}".format(name), "r")
return (f.read())
else:
return ("1")
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
if '/update' in self.path:
b = self.path.replace("/update", "")
a = GetLatestVersion()
if(float(a) > float(b)):
a = GetAllFiles()
self.send_response(200, message = None)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("{}".format(a).encode("utf-8")))
else:
self.send_response(200, message = None)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("you dont need to update!".encode("utf-8")))
else:
self.send_response(200, message = None)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("{}".format(GetFile(self.path.replace("%20", " ").replace("/", ""))).encode("utf-8")))
httpd = socketserver.TCPServer(("", 8080), MyHandler)
httpd.serve_forever()
| 30.514286
| 121
| 0.603464
|
4a02ac6172c0a1e19a95d60a260e7f0d589aec47
| 814
|
bzl
|
Python
|
gitops/defs.bzl
|
apesternikov/rules_gitops
|
591e101db2c7fdfb0b38b8b6703a4b5e349e36e0
|
[
"Apache-2.0"
] | 94
|
2020-03-06T02:48:39.000Z
|
2022-03-31T06:25:20.000Z
|
gitops/defs.bzl
|
apesternikov/rules_gitops
|
591e101db2c7fdfb0b38b8b6703a4b5e349e36e0
|
[
"Apache-2.0"
] | 30
|
2020-03-06T21:46:08.000Z
|
2022-02-08T20:12:03.000Z
|
gitops/defs.bzl
|
apesternikov/rules_gitops
|
591e101db2c7fdfb0b38b8b6703a4b5e349e36e0
|
[
"Apache-2.0"
] | 32
|
2020-03-06T19:55:41.000Z
|
2022-03-03T04:15:14.000Z
|
# Copyright 2020 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
GitOps rules public interface
"""
load("@com_adobe_rules_gitops//skylib:k8s.bzl", _k8s_deploy = "k8s_deploy", _k8s_test_setup = "k8s_test_setup")
k8s_deploy = _k8s_deploy
k8s_test_setup = _k8s_test_setup
| 42.842105
| 111
| 0.780098
|
4a02acb99b70b26b958a94173c4e86d900531a55
| 28,653
|
py
|
Python
|
Core/Python/invoke_refresh_inventory.py
|
Anon-Artist/OpenManage-Enterprise
|
9d807204a6bacf060d72f5ac4f88ca4abdb5e972
|
[
"Apache-2.0"
] | null | null | null |
Core/Python/invoke_refresh_inventory.py
|
Anon-Artist/OpenManage-Enterprise
|
9d807204a6bacf060d72f5ac4f88ca4abdb5e972
|
[
"Apache-2.0"
] | null | null | null |
Core/Python/invoke_refresh_inventory.py
|
Anon-Artist/OpenManage-Enterprise
|
9d807204a6bacf060d72f5ac4f88ca4abdb5e972
|
[
"Apache-2.0"
] | null | null | null |
#
# _author_ = Grant Curell <grant_curell@dell.com>
#
# Copyright (c) 2021 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
#### Synopsis
Refreshes the inventory on a set of target devices. This includes the configuration inventory tab.
#### Description
This script uses the OME REST API to refresh the inventory of a targeted server. It performs X-Auth
with basic authentication. Note: Credentials are not stored on disk.
#### Python Example
`python invoke_refresh_inventory.py -i 192.168.1.93 -u admin -p somepass --idrac-ips 192.168.1.63,192.168.1.45`
"""
import argparse
import json
import sys
import time
from argparse import RawTextHelpFormatter
from pprint import pprint
from urllib.parse import urlparse
from getpass import getpass
try:
import urllib3
import requests
except ModuleNotFoundError:
print("This program requires urllib3 and requests. To install them on most systems run `pip install requests"
"urllib3`")
sys.exit(0)
def authenticate(ome_ip_address: str, ome_username: str, ome_password: str) -> dict:
"""
Authenticates with OME and creates a session
Args:
ome_ip_address: IP address of the OME server
ome_username: Username for OME
ome_password: OME password
Returns: A dictionary of HTTP headers
Raises:
Exception: A generic exception in the event of a failure to connect.
"""
authenticated_headers = {'content-type': 'application/json'}
session_url = 'https://%s/api/SessionService/Sessions' % ome_ip_address
user_details = {'UserName': ome_username,
'Password': ome_password,
'SessionType': 'API'}
try:
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=authenticated_headers)
except requests.exceptions.ConnectionError:
print("Failed to connect to OME. This typically indicates a network connectivity problem. Can you ping OME?")
sys.exit(0)
if session_info.status_code == 201:
authenticated_headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
return authenticated_headers
print("There was a problem authenticating with OME. Are you sure you have the right username, password, "
"and IP?")
raise Exception("There was a problem authenticating with OME. Are you sure you have the right username, "
"password, and IP?")
def get_group_id_by_name(ome_ip_address: str, group_name: str, authenticated_headers: dict) -> int:
"""
Retrieves the ID of a group given its name.
Args:
ome_ip_address: The IP address of the OME server
group_name: The name of the group whose ID you want to resolve.
authenticated_headers: Headers used for authentication to the OME server
Returns: Returns the ID of the group as an integer or -1 if it couldn't be found.
"""
print("Searching for the requested group.")
groups_url = "https://%s/api/GroupService/Groups?$filter=Name eq '%s'" % (ome_ip_address, group_name)
group_response = requests.get(groups_url, headers=authenticated_headers, verify=False)
if group_response.status_code == 200:
json_data = json.loads(group_response.content)
if json_data['@odata.count'] > 1:
print("WARNING: We found more than one name that matched the group name: " + group_name +
". We are picking the first entry.")
if json_data['@odata.count'] == 1 or json_data['@odata.count'] > 1:
group_id = json_data['value'][0]['Id']
if not isinstance(group_id, int):
print("The server did not return an integer ID. Something went wrong.")
return -1
return group_id
print("Error: We could not find the group " + group_name + ". Exiting.")
return -1
print("Unable to retrieve groups. Exiting.")
return -1
def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> dict:
"""
This function retrieves data from a specified URL. Get requests from OME return paginated data. The code below
handles pagination. This is the equivalent in the UI of a list of results that require you to go to different
pages to get a complete listing.
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
url: The API url against which you would like to make a request
odata_filter: An optional parameter for providing an odata filter to run against the API endpoint.
max_pages: The maximum number of pages you would like to return
Returns: Returns a dictionary of data received from OME
"""
next_link_url = None
if odata_filter:
count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)
if count_data.status_code == 400:
print("Received an error while retrieving data from %s:" % url + '?$filter=' + odata_filter)
pprint(count_data.json()['error'])
return {}
count_data = count_data.json()
if count_data['@odata.count'] <= 0:
print("No results found!")
return {}
else:
count_data = requests.get(url, headers=authenticated_headers, verify=False).json()
if 'value' in count_data:
data = count_data['value']
else:
data = count_data
if '@odata.nextLink' in count_data:
# Grab the base URI
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']
i = 1
while next_link_url is not None:
# Break if we have reached the maximum number of pages to be returned
if max_pages:
if i >= max_pages:
break
else:
i = i + 1
response = requests.get(next_link_url, headers=authenticated_headers, verify=False)
next_link_url = None
if response.status_code == 200:
requested_data = response.json()
if requested_data['@odata.count'] <= 0:
print("No results found!")
return {}
# The @odata.nextLink key is only present in data if there are additional pages. We check for it and if it
# is present we get a link to the page with the next set of results.
if '@odata.nextLink' in requested_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \
requested_data['@odata.nextLink']
if 'value' in requested_data:
data += requested_data['value']
else:
data += requested_data
else:
print("Unknown error occurred. Received HTTP response code: " + str(response.status_code) +
" with error: " + response.text)
raise Exception("Unknown error occurred. Received HTTP response code: " + str(response.status_code)
+ " with error: " + response.text)
return data
def track_job_to_completion(ome_ip_address: str,
authenticated_headers: dict,
tracked_job_id,
max_retries: int = 20,
sleep_interval: int = 30) -> bool:
"""
Tracks a job to either completion or a failure within the job.
Args:
ome_ip_address: The IP address of the OME server
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
tracked_job_id: The ID of the job which you would like to track
max_retries: The maximum number of times the function should contact the server to see if the job has completed
sleep_interval: The frequency with which the function should check the server for job completion
Returns: True if the job completed successfully or completed with errors. Returns false if the job failed.
"""
job_status_map = {
"2020": "Scheduled",
"2030": "Queued",
"2040": "Starting",
"2050": "Running",
"2060": "Completed",
"2070": "Failed",
"2090": "Warning",
"2080": "New",
"2100": "Aborted",
"2101": "Paused",
"2102": "Stopped",
"2103": "Canceled"
}
failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]
job_url = 'https://%s/api/JobService/Jobs(%s)' % (ome_ip_address, tracked_job_id)
loop_ctr = 0
job_incomplete = True
print("Polling %s to completion ..." % tracked_job_id)
while loop_ctr < max_retries:
loop_ctr += 1
time.sleep(sleep_interval)
job_resp = get_data(authenticated_headers, job_url)
requests.get(job_url, headers=authenticated_headers, verify=False)
try:
if job_resp.status_code == 200:
job_status = str((job_resp.json())['LastRunStatus']['Id'])
job_status_str = job_status_map[job_status]
print("Iteration %s: Status of %s is %s" % (loop_ctr, tracked_job_id, job_status_str))
if int(job_status) == 2060:
job_incomplete = False
print("Job completed successfully!")
break
elif int(job_status) in failed_job_status:
job_incomplete = True
if job_status_str == "Warning":
print("Completed with errors")
else:
print("Error: Job failed.")
job_hist_url = str(job_url) + "/ExecutionHistories"
job_hist_resp = requests.get(job_hist_url, headers=authenticated_headers, verify=False)
if job_hist_resp.status_code == 200:
# Get the job's execution details
job_history_id = str((job_hist_resp.json())['value'][0]['Id'])
execution_hist_detail = "(" + job_history_id + ")/ExecutionHistoryDetails"
job_hist_det_url = str(job_hist_url) + execution_hist_detail
job_hist_det_resp = requests.get(job_hist_det_url,
headers=authenticated_headers,
verify=False)
if job_hist_det_resp.status_code == 200:
pprint(job_hist_det_resp.json()['value'])
else:
print("Unable to parse job execution history... exiting")
break
else:
print("Unable to poll status of %s - Iteration %s " % (tracked_job_id, loop_ctr))
except AttributeError:
print("There was a problem getting the job info during the wait. Full error details:")
pprint(job_resp.json())
return False
if job_incomplete:
print("Job %s incomplete after polling %s times...Check status" % (tracked_job_id, max_retries))
return False
return True
def get_device_id(authenticated_headers: dict,
ome_ip_address: str,
service_tag: str = None,
device_idrac_ip: str = None,
device_name: str = None) -> int:
"""
Resolves a service tag, idrac IP or device name to a device ID
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
ome_ip_address: IP address of the OME server
service_tag: (optional) The service tag of a host
device_idrac_ip: (optional) The idrac IP of a host
device_name: (optional): The name of a host
Returns: Returns the device ID or -1 if it couldn't be found
"""
if not service_tag and not device_idrac_ip and not device_name:
print("No argument provided to get_device_id. Must provide service tag, device idrac IP or device name.")
return -1
# If the user passed a device name, resolve that name to a device ID
if device_name:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceName eq \'%s\'" % device_name)
if len(device_id) == 0:
print("Error: We were unable to find device name " + device_name + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif service_tag:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceServiceTag eq \'%s\'" % service_tag)
if len(device_id) == 0:
print("Error: We were unable to find service tag " + service_tag + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif device_idrac_ip:
device_id = -1
device_ids = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceManagement/any(d:d/NetworkAddress eq '%s')" % device_idrac_ip)
if len(device_ids) == 0:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
# TODO - This is necessary because the filter above could possibly return multiple results
# TODO - See https://github.com/dell/OpenManage-Enterprise/issues/87
for device_id in device_ids:
if device_id['DeviceManagement'][0]['NetworkAddress'] == device_idrac_ip:
device_id = device_id['Id']
if device_id == -1:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
else:
device_id = -1
return device_id
def refresh_device_inventory(authenticated_headers: dict,
ome_ip_address: str,
group_name: str,
skip_config_inventory: bool,
device_ids: list = None,
service_tags: str = None,
device_idrac_ips: str = None,
device_names: str = None,
ignore_group: bool = False):
"""
Refresh the inventory of targeted hosts
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
ome_ip_address: IP address of the OME server
group_name: The name of the group which contains the servers whose inventories you want to refresh
skip_config_inventory: A boolean defining whether you would like to skip gathering the config inventory
device_ids: (optional) The device ID of a host whose inventory you want to refresh
service_tags: (optional) The service tag of a host whose inventory you want to refresh
device_idrac_ips: (optional) The idrac IP of a host whose inventory you want to refresh
device_names: (optional): The name of a host whose inventory you want to refresh
ignore_group: (optional): Controls whether you want to ignore using groups or not
"""
jobs_url = "https://%s/api/JobService/Jobs" % ome_ip_address
target_ids = []
if service_tags:
service_tags = service_tags.split(',')
for service_tag in service_tags:
target = get_device_id(headers, ome_ip_address, service_tag=service_tag)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + service_tag)
if device_idrac_ips:
device_idrac_ips = args.idrac_ips.split(',')
for device_idrac_ip in device_idrac_ips:
target = get_device_id(headers, ome_ip_address, device_idrac_ip=device_idrac_ip)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_idrac_ip)
if device_names:
device_names = device_names.split(',')
for device_name in device_names:
target = get_device_id(headers, ome_ip_address, device_name=device_name)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_name)
if device_ids:
for device_id in device_ids:
target_ids.append(device_id)
if not skip_config_inventory:
group_id = get_group_id_by_name(ome_ip_address, group_name, authenticated_headers)
if group_id == -1:
print("We were unable to find the ID for group name " + group_name + " ... exiting.")
sys.exit(0)
if not ignore_group:
group_devices = get_data(headers, "https://%s/api/GroupService/Groups(%s)/Devices" % (ome_ip_address, group_id))
if len(group_devices) < 1:
print("Error: There was a problem retrieving the devices for group " + args.groupname + ". Exiting")
sys.exit(0)
for device in group_devices:
target_ids.append(device['Id'])
targets_payload = []
for id_to_refresh in target_ids:
targets_payload.append({
"Id": id_to_refresh,
"Data": "",
"TargetType": {
"Id": 1000,
"Name": "DEVICE"
}
})
payload = {
"Id": 0,
"JobName": "Inventory refresh via the API.",
"JobDescription": "Refreshes the inventories for targeted hardware.",
"Schedule": "startnow",
"State": "Enabled",
"JobType": {
"Name": "Inventory_Task"
},
"Targets": targets_payload
}
print("Beginning standard inventory refresh...")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
job_id_generic_refresh = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if job_id_generic_refresh is None:
print("Received invalid job ID from OME for standard inventory. Exiting.")
sys.exit(1)
# ------------------------------------------------------
if not skip_config_inventory:
payload = {
"JobDescription": "Run config inventory collection task on selected devices",
"JobName": "Part 1 - API refresh config inventory",
"JobType": {"Id": 50, "Name": "Device_Config_Task"},
"Params": [{"Key": "action", "Value": "CONFIG_INVENTORY"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 1 of 2 of the configuration inventory refresh.")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_1 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_1 is None:
print("Received invalid job ID from OME for part 1 of configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 1 of configuration inventory refresh to finish. This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_1):
print("Part 1 of configuration inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
# ------------------------------------------------------
payload = {
"JobDescription": "Create Inventory",
"JobName": "Part 2 - API refresh config inventory",
"JobType": {"Id": 8, "Name": "Inventory_Task"},
"Params": [
{"Key": "action", "Value": "CONFIG_INVENTORY"},
{"Key": "isCollectDriverInventory", "Value": "true"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 2 of 2 of the configuration inventory refresh")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_2 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_2 is None:
print("Received invalid job ID from OME for part 2 of the configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 2 of the configuration inventory refresh to finish. "
"This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_2):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Tracking standard inventory to completion.")
if track_job_to_completion(ome_ip_address, authenticated_headers, job_id_generic_refresh):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Inventory refresh complete!")
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False,
help="Username for the OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=False,
help="Password for the OME Appliance")
parser.add_argument("--groupname", "-g", required=False, default="All Devices",
help="The name of the group containing the devices whose inventory you want to refresh. "
"Defaults to all devices. Due to the way the API functions, if you want to refresh the "
"configuration inventory, you must have all applicable devices in a group. The "
"configuration inventory is specific to the tab called \"Configuration Inventory\" under "
"a device's view. You can use the create_static_group and add_device_to_static group "
"modules to do this programmatically.")
parser.add_argument("--device-ids", "-d", help="A comma separated list of device-ids to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--service-tags", "-s", help="A comma separated list of service tags to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--idrac-ips", "-r", help="A comma separated list of idrac IPs to refresh. Applies to regular "
"inventory only. This does not impact the configuration inventory "
"tab. That is controlled by the group name.")
parser.add_argument("--device-names", "-n", help="A comma separated list of device names to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--skip-config-inventory", "-skip", default=False, action='store_true',
help="The configuration inventory is the inventory you see specifically under the tab for a"
" specific device. In order to obtain a config inventory that server must be part of a"
" group or you have to run an inventory update against all devices which can be time "
"consuming. A regular inventory run will update things like firmware assuming that the"
" version change is reflected in idrac. A config inventory is launched in the GUI by "
"clicking \"Run inventory\" on quick links on the devices page. A regular inventory is "
"the same as clicking \"Run inventory\" on a specific device\'s page.")
parser.add_argument("--ignore-group", default=False, action='store_true', help="Used when you only want to run a"
" regular inventory and you do not want to provide a group.")
args = parser.parse_args()
if not args.password:
args.password = getpass()
try:
headers = authenticate(args.ip, args.user, args.password)
if not headers:
sys.exit(0)
if args.device_ids:
device_ids_arg = args.device_ids.split(',')
else:
device_ids_arg = None
if args.service_tags:
service_tags_arg = args.service_tags.split(',')
else:
service_tags_arg = None
if args.idrac_ips:
idrac_ips_arg = args.idrac_ips.split(',')
else:
idrac_ips_arg = None
if args.device_names:
device_names_arg = args.device_names.split(',')
else:
device_names_arg = None
print("WARNING: To reflect firmware changes you may have to power cycle the server first before running this. "
"It is situation dependent.")
if args.groupname == 'All Devices':
print("WARNING: No argument was provided for groupname. Defaulting to \'All Devices\' for the "
"inventory refresh. See help for details. This will also display if the argument was manually set "
"to \'All Devices\' and can be safely ignored. If you do not want to use a group AND you do not want"
" to update the configuration inventory tab, use the --skip-config-inventory and --ignore-group"
" switches together. If you want to use a group to update regular inventories only and not the"
" configuration inventory tab use the --skip-config-inventory switch by itself.")
refresh_device_inventory(headers, args.ip, args.groupname, args.skip_config_inventory, device_ids_arg,
service_tags_arg, idrac_ips_arg, device_names_arg, args.ignore_group)
except Exception as error:
print("Unexpected error:", str(error))
| 44.770313
| 120
| 0.606638
|
4a02ae3711656c86f400ae0c0aaf42541f075662
| 154
|
py
|
Python
|
python-codes/m2_curso_em_video_estruturas_de_controle/ex071.5.py
|
lucasportella/learning_repo
|
a9449dffd489e7e1f1619e3acef86bc2c64f0f14
|
[
"MIT"
] | null | null | null |
python-codes/m2_curso_em_video_estruturas_de_controle/ex071.5.py
|
lucasportella/learning_repo
|
a9449dffd489e7e1f1619e3acef86bc2c64f0f14
|
[
"MIT"
] | null | null | null |
python-codes/m2_curso_em_video_estruturas_de_controle/ex071.5.py
|
lucasportella/learning_repo
|
a9449dffd489e7e1f1619e3acef86bc2c64f0f14
|
[
"MIT"
] | null | null | null |
saque = int(input('Valor do saque: '))
for notas in [50, 20, 10, 1]:
quant = saque // notas
print(f'{quant} notas de {notas}')
saque %= notas
| 25.666667
| 38
| 0.584416
|
4a02ae5121fd2b0f3d5b79c4eae90ffd96ee15ca
| 21,283
|
py
|
Python
|
sdk/tables/azure-data-tables/tests/test_table_cosmos.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | 1
|
2020-03-05T18:10:35.000Z
|
2020-03-05T18:10:35.000Z
|
sdk/tables/azure-data-tables/tests/test_table_cosmos.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | 2
|
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/tables/azure-data-tables/tests/test_table_cosmos.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import sys
import locale
import os
from time import sleep
from azure.data.tables import TableServiceClient
from datetime import (
datetime,
timedelta,
)
from azure.data.tables import (
ResourceTypes,
AccountSasPermissions,
TableSasPermissions,
CorsRule,
RetentionPolicy,
UpdateMode,
AccessPolicy,
TableAnalyticsLogging,
Metrics
)
from azure.core.pipeline import Pipeline
from azure.core.pipeline.policies import (
HeadersPolicy,
ContentDecodePolicy,
)
from _shared.testcase import (
TableTestCase,
RERUNS_DELAY,
SLEEP_DELAY
)
from _shared.cosmos_testcase import CachedCosmosAccountPreparer
from devtools_testutils import CachedResourceGroupPreparer
from azure.data.tables._authentication import SharedKeyCredentialPolicy
from azure.data.tables._table_shared_access_signature import generate_account_sas
from azure.core.pipeline.transport import RequestsTransport
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
ResourceExistsError
)
from devtools_testutils import CachedResourceGroupPreparer
# ------------------------------------------------------------------------------
TEST_TABLE_PREFIX = 'pytablesync'
# ------------------------------------------------------------------------------
def _create_pipeline(account, credential, **kwargs):
# type: (Any, **Any) -> Tuple[Configuration, Pipeline]
credential_policy = SharedKeyCredentialPolicy(account_name=account.name, account_key=credential)
transport = RequestsTransport(**kwargs)
policies = [
HeadersPolicy(),
credential_policy,
ContentDecodePolicy(response_encoding="utf-8")]
return Pipeline(transport, policies=policies)
class StorageTableTest(TableTestCase):
# --Helpers-----------------------------------------------------------------
def _get_table_reference(self, prefix=TEST_TABLE_PREFIX):
table_name = self.get_resource_name(prefix)
return table_name
def _create_table(self, ts, prefix=TEST_TABLE_PREFIX, table_list=None):
table_name = self._get_table_reference(prefix)
try:
table = ts.create_table(table_name)
if table_list is not None:
table_list.append(table)
except ResourceExistsError:
table = ts.get_table_client(table_name)
return table
def _delete_table(self, ts, table):
if table is None:
return
try:
ts.delete_table(table.table_name)
except ResourceNotFoundError:
pass
def _delete_all_tables(self, ts):
tables = ts.list_tables()
for table in tables:
try:
ts.delete_table(table.table_name)
except ResourceNotFoundError:
pass
# --Test cases for tables --------------------------------------------------
@pytest.mark.skip("Cosmos Tables does not yet support service properties")
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_create_properties(self, resource_group, location, cosmos_account, cosmos_account_key):
# # Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table_name = self._get_table_reference()
# Act
created = ts.create_table(table_name)
# Assert
assert created.table_name == table_name
# properties = ts.get_service_properties()
ts.set_service_properties(analytics_logging=TableAnalyticsLogging(write=True))
p = ts.get_service_properties()
ts.set_service_properties(minute_metrics= Metrics(enabled=True, include_apis=True,
retention_policy=RetentionPolicy(enabled=True, days=5)))
ps = ts.get_service_properties()
ts.delete_table(table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_create_table(self, resource_group, location, cosmos_account, cosmos_account_key):
# # Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table_name = self._get_table_reference()
# Act
created = ts.create_table(table_name)
# Assert
assert created.table_name == table_name
ts.delete_table(table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_create_table_fail_on_exist(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table_name = self._get_table_reference()
# Act
created = ts.create_table(table_name)
with pytest.raises(ResourceExistsError):
ts.create_table(table_name)
# Assert
assert created
ts.delete_table(table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_query_tables_per_page(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table_name = "mytable"
for i in range(5):
ts.create_table(table_name + str(i))
query_filter = "TableName eq 'mytable0' or TableName eq 'mytable1' or TableName eq 'mytable2'"
table_count = 0
page_count = 0
for table_page in ts.query_tables(filter=query_filter, results_per_page=2).by_page():
temp_count = 0
for table in table_page:
temp_count += 1
assert temp_count <= 2
page_count += 1
table_count += temp_count
assert page_count == 2
assert table_count == 3
self._delete_all_tables(ts)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_create_table_invalid_name(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
invalid_table_name = "my_table"
with pytest.raises(ValueError) as excinfo:
ts.create_table(table_name=invalid_table_name)
assert "Table names must be alphanumeric, cannot begin with a number, and must be between 3-63 characters long.""" in str(
excinfo)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_delete_table_invalid_name(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
invalid_table_name = "my_table"
with pytest.raises(ValueError) as excinfo:
ts.create_table(invalid_table_name)
assert "Table names must be alphanumeric, cannot begin with a number, and must be between 3-63 characters long.""" in str(
excinfo)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_query_tables(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table = self._create_table(ts)
# Act
tables = list(ts.list_tables())
# Assert
assert tables is not None
assert len(tables) >= 1
assert tables[0] is not None
ts.delete_table(table.table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_query_tables_with_filter(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table = self._create_table(ts)
# Act
name_filter = "TableName eq '{}'".format(table.table_name)
tables = list(ts.query_tables(filter=name_filter))
# Assert
assert tables is not None
assert len(tables) == 1
ts.delete_table(table.table_name)
self._delete_all_tables(ts)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_query_tables_with_num_results(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
prefix = 'listtable'
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table_list = []
for i in range(0, 4):
self._create_table(ts, prefix + str(i), table_list)
# Act
small_page = []
big_page = []
for s in next(ts.list_tables(results_per_page=3).by_page()):
small_page.append(s)
assert s.table_name.startswith(prefix)
for t in next(ts.list_tables().by_page()):
big_page.append(t)
assert t.table_name.startswith(prefix)
# Assert
assert len(small_page) == 3
assert len(big_page) >= 4
self._delete_all_tables(ts)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_query_tables_with_marker(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
prefix = 'listtable'
table_names = []
for i in range(0, 4):
self._create_table(ts, prefix + str(i), table_names)
# table_names.sort()
# Act
generator1 = ts.list_tables(results_per_page=2).by_page()
next(generator1)
generator2 = ts.list_tables(results_per_page=2).by_page(
continuation_token=generator1.continuation_token)
next(generator2)
tables1 = generator1._current_page
tables2 = generator2._current_page
# Assert
assert len(tables1) == 2
assert len(tables2) == 2
assert tables1 != tables2
self._delete_all_tables(ts)
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_delete_table_with_existing_table(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table = self._create_table(ts)
# Act
deleted = ts.delete_table(table_name=table.table_name)
# Assert
existing = list(ts.query_tables("TableName eq '{}'".format(table.table_name)))
assert len(existing) == 0
if self.is_live:
sleep(SLEEP_DELAY)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_delete_table_with_non_existing_table_fail_not_exist(self, resource_group, location, cosmos_account,
cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table_name = self._get_table_reference()
# Act
with pytest.raises(HttpResponseError):
ts.delete_table(table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@pytest.mark.skip("Cosmos does not support table access policy")
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_unicode_create_table_unicode_name(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
url = self.account_url(cosmos_account, "cosmos")
ts = TableServiceClient(url, cosmos_account_key)
table_name = u'啊齄丂狛狜'
# Act
with pytest.raises(HttpResponseError):
ts.create_table(table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@pytest.mark.skip("Cosmos does not support table access policy")
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_get_table_acl(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
url = self.account_url(cosmos_account, "cosmos")
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table = self._create_table(ts)
try:
# Act
acl = table.get_table_access_policy()
# Assert
assert acl is not None
assert len(acl) == 0
finally:
ts.delete_table(table.table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@pytest.mark.skip("Cosmos does not support table access policy")
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_set_table_acl_with_empty_signed_identifiers(self, resource_group, location, cosmos_account,
cosmos_account_key):
# Arrange
url = self.account_url(cosmos_account, "cosmos")
ts = TableServiceClient(url, cosmos_account_key)
table = self._create_table(ts)
try:
# Act
table.set_table_access_policy(signed_identifiers={})
# Assert
acl = table.get_table_access_policy()
assert acl is not None
assert len(acl) == 0
finally:
ts.delete_table(table.table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@pytest.mark.skip("Cosmos does not support table access policy")
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_set_table_acl_with_empty_signed_identifier(self, resource_group, location, cosmos_account,
cosmos_account_key):
# Arrange
url = self.account_url(cosmos_account, "cosmos")
ts = TableServiceClient(url, cosmos_account_key)
table = self._create_table(ts)
try:
# Act
table.set_table_access_policy(signed_identifiers={'empty': None})
# Assert
acl = table.get_table_access_policy()
assert acl is not None
assert len(acl) == 1
assert acl['empty'] is not None
assert acl['empty'].permission is None
assert acl['empty'].expiry is None
assert acl['empty'].start is None
finally:
ts.delete_table(table.table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@pytest.mark.skip("Cosmos does not support table access policy")
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_set_table_acl_with_signed_identifiers(self, resource_group, location, cosmos_account,
cosmos_account_key):
# Arrange
url = self.account_url(cosmos_account, "cosmos")
ts = TableServiceClient(url, cosmos_account_key)
table = self._create_table(ts)
client = ts.get_table_client(table_name=table.table_name)
# Act
identifiers = dict()
identifiers['testid'] = AccessPolicy(start=datetime.utcnow() - timedelta(minutes=5),
expiry=datetime.utcnow() + timedelta(hours=1),
permission='r')
try:
client.set_table_access_policy(signed_identifiers=identifiers)
# Assert
acl = client.get_table_access_policy()
assert acl is not None
assert len(acl) == 1
assert 'testid' in acl
finally:
ts.delete_table(table.table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@pytest.mark.skip("Cosmos does not support table access policy")
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_set_table_acl_too_many_ids(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
url = self.account_url(cosmos_account, "cosmos")
ts = TableServiceClient(url, cosmos_account_key)
table = self._create_table(ts)
try:
# Act
identifiers = dict()
for i in range(0, 6):
identifiers['id{}'.format(i)] = None
# Assert
with pytest.raises(ValueError):
table.set_table_access_policy(table_name=table.table_name, signed_identifiers=identifiers)
finally:
ts.delete_table(table.table_name)
if self.is_live:
sleep(SLEEP_DELAY)
@pytest.mark.skip("Cosmos Tables does not yet support sas")
@pytest.mark.live_test_only
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_account_sas(self, resource_group, location, cosmos_account, cosmos_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
url = self.account_url(cosmos_account, "cosmos")
tsc = TableServiceClient(url, cosmos_account_key)
table = self._create_table(tsc)
try:
entity = {
'PartitionKey': 'test',
'RowKey': 'test1',
'text': 'hello',
}
table.upsert_entity(mode=UpdateMode.MERGE, entity=entity)
entity['RowKey'] = 'test2'
table.upsert_entity(mode=UpdateMode.MERGE, entity=entity)
token = generate_account_sas(
cosmos_account.name,
cosmos_account_key,
resource_types=ResourceTypes(object=True),
permission=AccountSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start=datetime.utcnow() - timedelta(minutes=1),
)
# Act
service = TableServiceClient(
self.account_url(cosmos_account, "cosmos"),
credential=token,
)
sas_table = service.get_table_client(table.table_name)
entities = list(sas_table.list_entities())
# Assert
assert len(entities) == 2
assert entities[0].text == 'hello'
assert entities[1].text == 'hello'
finally:
self._delete_table(table=table, ts=tsc)
@pytest.mark.skip("Test fails on Linux and in Python2. Throws a locale.Error: unsupported locale setting")
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedCosmosAccountPreparer(name_prefix="tablestest")
def test_locale(self, resource_group, location, cosmos_account, cosmos_account_key):
# Arrange
ts = TableServiceClient(self.account_url(cosmos_account, "cosmos"), cosmos_account_key)
table = (self._get_table_reference())
init_locale = locale.getlocale()
if os.name == "nt":
culture = "Spanish_Spain"
elif os.name == 'posix':
culture = 'es_ES.UTF-8'
else:
culture = 'es_ES.utf8'
locale.setlocale(locale.LC_ALL, culture)
e = None
# Act
ts.create_table(table)
resp = ts.list_tables()
e = sys.exc_info()[0]
# Assert
assert e is None
ts.delete_table(table)
locale.setlocale(locale.LC_ALL, init_locale[0] or 'en_US')
if self.is_live:
sleep(SLEEP_DELAY)
| 36.506003
| 130
| 0.642015
|
4a02ae65b7c0c891325046e4c0d8e7b4230963b9
| 1,318
|
py
|
Python
|
common/src/stack/command/stack/commands/remove/appliance/route/__init__.py
|
shivanshs9/stacki
|
258740748281dfe89b0f566261eaf23102f91aa4
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/remove/appliance/route/__init__.py
|
shivanshs9/stacki
|
258740748281dfe89b0f566261eaf23102f91aa4
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/remove/appliance/route/__init__.py
|
shivanshs9/stacki
|
258740748281dfe89b0f566261eaf23102f91aa4
|
[
"BSD-3-Clause"
] | null | null | null |
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
from stack.exception import ArgRequired
class Command(stack.commands.remove.appliance.command):
"""
Remove a static route for an appliance type.
<arg type='string' name='appliance'>
Appliance name. This argument is required.
</arg>
<param type='string' name='address' optional='0'>
The address of the static route to remove.
</param>
<example cmd='remove appliance route backend address=1.2.3.4'>
Remove the static route for the 'backend' appliance that has the
network address '1.2.3.4'.
</example>
"""
def run(self, params, args):
if len(args) == 0:
raise ArgRequired(self, 'appliance')
(address, ) = self.fillParams([ ('address', None, True) ])
for appliance in self.getApplianceNames(args):
self.db.execute("""
delete from appliance_routes
where appliance=(
select id from appliances where name=%s
) and network=%s
""", (appliance, address))
| 27.458333
| 71
| 0.710926
|
4a02ae92d473fc8891f88738d5f6d3ecef96cc8d
| 13,245
|
py
|
Python
|
code/test2.py
|
lfearnley/TRAPD
|
ebdd8dc92badcd85f7c7cd6654a5e9235c309fac
|
[
"MIT"
] | null | null | null |
code/test2.py
|
lfearnley/TRAPD
|
ebdd8dc92badcd85f7c7cd6654a5e9235c309fac
|
[
"MIT"
] | null | null | null |
code/test2.py
|
lfearnley/TRAPD
|
ebdd8dc92badcd85f7c7cd6654a5e9235c309fac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import bisect
import gzip
import operator
import optparse
import sys
#Parse options
parser = optparse.OptionParser()
parser.add_option("-v", "--vcffile", action="store",dest="vcffilename")
parser.add_option("-o", "--outfile", action="store",dest="outfilename", default="snpfile.txt")
parser.add_option("--genecolname", action="store", dest="genecolname")
#Filters
parser.add_option("--includeinfo", action="append",dest="includeinfo")
parser.add_option("--excludeinfo", action="append",dest="excludeinfo")
parser.add_option("--includevep", action="append",dest="includevep")
parser.add_option("--excludevep", action="append",dest="excludevep")
parser.add_option("--pass", action="store_true", dest="passfilter")
parser.add_option("--vep", action="store_true", dest="vep")
parser.add_option("--snponly", action="store_true", dest="snponly")
parser.add_option("--indelonly", action="store_true", dest="indelonly")
parser.add_option("--bedfile", action="store", dest="bedfilename")
parser.add_option("--snpformat", action="store",dest="snpformat", default="CHRPOSREFALT")
parser.add_option("--genenull", action="store", dest="genenull", default=".,NA")
options, args = parser.parse_args()
#Try to catch potential errors
if not options.vcffilename: # if filename is not given
parser.error('A vcf file is needed')
sys.exit()
if (options.vcffilename.endswith(".gz") is False) and (options.vcffilename.endswith(".bgz") is False): # if vcf filename is not given
parser.error('Is your vcf file gzipped?')
sys.exit()
if not options.genecolname:
parser.error('An INFO field with the gene names to use must be provided')
sys.exit()
if (options.includevep is not None) or (options.excludevep is not None):
if not options.vep:
parser.error('--vep option must be supplied if using VEP annotations')
sys.exit()
if options.snpformat!="VCFID" and options.snpformat!="CHRPOSREFALT": # if filename is not given
parser.error('SNP format must be "VCFID" or "CHRPOSREFALT"')
sys.exit()
if options.snponly and options.indelonly:
parser.error('Please select only --snponly or --indelonly')
sys.exit()
#Check to make sure all the filters seem well formed
def checkfilter(infofilter):
if ("[" not in infofilter) or (infofilter.startswith("]")) or (infofilter.endswith("]")) or str(infofilter.split("[")[1].split("]")[0]) not in ["<", ">", "<=", ">=", "=", "!=", "in", "%"]:
return 0
else:
return 1
#Filter to make sure that values are either all numeric or all str
def consistent(option_value, field_value):
try:
float(option_value) or int(option_value)
try:
float(field_value) or int(field_value)
field_out=float(field_value)
option_out=float(option_value)
c=1
except ValueError:
option_out=option_value
field_out=field_value
c=0
except ValueError:
field_out=str(field_value)
option_out=str(option_value)
c=1
return [option_out, field_out, c]
#Read in vcf header and extract all INFO fields
info_fields=[]
chrformat="number"
vcffile=gzip.open(options.vcffilename, "rb")
for line_vcf1 in vcffile:
if line_vcf1[0]=="#":
if "##INFO=<ID=" in line_vcf1:
temp_field=line_vcf1.split("##INFO=<ID=")[1].split(",")[0]
info_fields.append(temp_field)
elif "##contig" in line_vcf1:
if "ID=chr" in line_vcf1:
chrformat="chr"
else:
break
vcffile.close()
#Read in vcf header to get VEP CSQ fields
if options.vep:
vcffile=gzip.open(options.vcffilename, "rb")
csq_found=0
for line_vcf1 in vcffile:
if line_vcf1[0]=="#":
if ("ID=CSQ" in line_vcf1) or ("ID=vep" in line_vcf1):
csq_anno=line_vcf1.rstrip('\n').replace('"', '').strip('>').split("Format: ")[1].split("|")
csq_found=1
break
if csq_found==0:
sys.stdout.write("VEP CSQ annotations not found in vcf header\n")
sys.exit()
vcffile.close()
if options.vep:
if options.genecolname not in csq_anno:
sys.stdout.write("Gene column name not found in VEP annotations\n")
sys.exit()
#Run through all filters to make sure they're okay
if options.includeinfo is not None:
for i in range(0, len(options.includeinfo), 1):
if checkfilter(options.includeinfo[i])==0:
sys.stdout.write(str(options.includeinfo[i])+" is malformed\n")
sys.exit()
if options.includeinfo[i].split("[")[0] not in info_fields:
sys.stdout.write(str(options.includeinfo[i])+" is not in VCF file\n")
sys.exit()
if options.excludeinfo is not None:
for i in range(0, len(options.excludeinfo), 1):
if checkfilter(options.excludeinfo[i])==0:
sys.stdout.write(str(options.excludeinfo[i])+" is malformed\n")
sys.exit()
if options.excludeinfo[i].split("[")[0] not in info_fields:
sys.stdout.write(str(options.excludeinfo[i])+" is not in VCF file\n")
sys.exit()
if options.includevep is not None:
for i in range(0, len(options.includevep), 1):
if checkfilter(options.includevep[i])==0:
sys.stdout.write(str(options.includevep[i])+" is malformed\n")
sys.exit()
if options.includevep[i].split("[")[0] not in csq_anno:
sys.stdout.write(str(options.includevep[i])+" is not in VCF file\n")
sys.exit()
if options.excludevep is not None:
for i in range(0, len(options.excludevep), 1):
if checkfilter(options.excludevep[i])==0:
sys.stdout.write(str(options.excludevep[i])+" is malformed\n")
sys.exit()
if options.excludevep[i].split("[")[0] not in csq_anno:
sys.stdout.write(str(options.excludevep[i])+" is not in VCF file\n")
sys.exit()
#Test if something is a number
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
#Extract canonical vep annotation
def canonical_vep(vcfline):
annots=(";"+vcfline).split(";CSQ=")[1].split(";")[0].split(",")
canonical_index=csq_anno.index("CANONICAL")
out=""
for i in range(0, len(annots), 1):
if len(csq_anno)==len(annots[i].split("|")):
if str(annots[i].split("|")[canonical_index])=="YES":
out=annots[i]
return out
def test_include_info(filter, vcfline):
option_field=filter.split("[")[0]
option_value=filter.split("]")[1]
if (";"+option_field+"=") in (";"+vcfline):
field_value=(";"+vcfline).split((";"+option_field+"="))[1].split(";")[0].split(",")[0]
consist_out=consistent(option_value, field_value)
if consist_out[2]==1:
if filter.split("[")[1].split("]")[0]=="in":
listvalues=option_value.lstrip("(").rstrip(")").split(',')
counter=0
for i in range(0, len(listvalues), 1):
if operator.eq(field_value, listvalues[i]):
counter+=1
if counter>0:
return 1
else:
return 0
else:
if get_operator_fn(filter.split("[")[1].split("]")[0])(consist_out[1], consist_out[0]):
return 1
else:
return 0
else:
return 0
else:
return 0
def test_exclude_info(filter, vcfline):
option_field=filter.split("[")[0]
option_value=filter.split("]")[1]
if (";"+option_field+"=") in (";"+vcfline):
field_value=(";"+vcfline).split((";"+option_field+"="))[1].split(";")[0].split(",")[0]
consist_out=consistent(option_value, field_value)
if consist_out[2]==1:
if filter.split("[")[1].split("]")[0]=="in":
listvalues=option_value.lstrip("(").rstrip(")").split(',')
counter=0
for i in range(0, len(listvalues), 1):
if operator.eq(field_value, listvalues[i]):
counter+=1
if counter>0:
return 0
else:
return 1
else:
if get_operator_fn(filter.split("[")[1].split("]")[0])(consist_out[1], consist_out[0]):
return 0
else:
return 1
else:
return 0
else:
return 0
def test_include_vep(filter, annot, csq_anno):
option_field=filter.split("[")[0]
csq_index=csq_anno.index(option_field)
option_value=filter.split("]")[1]
field_value=annot.split("|")[csq_index]
consist_out=consistent(option_value, field_value)
if consist_out[2]==1:
if filter.split("[")[1].split("]")[0]=="in":
listvalues=option_value.lstrip("(").rstrip(")").split(',')
counter=0
for i in range(0, len(listvalues), 1):
if operator.eq(field_value, listvalues[i]):
counter+=1
if counter>0:
return 1
else:
return 0
else:
if get_operator_fn(filter.split("[")[1].split("]")[0])(consist_out[1], consist_out[0]):
return 1
else:
return 0
else:
return 0
def test_exclude_vep(filter, annot, csq_anno):
option_field=filter.split("[")[0]
csq_index=csq_anno.index(option_field)
option_value=filter.split("]")[1]
field_value=annot.split("|")[csq_index]
consist_out=consistent(option_value, field_value)
if consist_out[2]==1:
if filter.split("[")[1].split("]")[0]=="in":
listvalues=option_value.lstrip("(").rstrip(")").split(',')
counter=0
for i in range(0, len(listvalues), 1):
if operator.eq(field_value, listvalues[i]):
counter+=1
if counter>0:
return 0
else:
return 1
else:
if get_operator_fn(filter.split("[")[1].split("]")[0])(consist_out[1], consist_out[0]):
return 0
else:
return 1
else:
return 0
def find_vep_gene(genecolname, annot, csq_anno):
csq_index=csq_anno.index(genecolname)
genename=annot.split("|")[csq_index]
return genename
def find_info_gene(genecolname, vcfline):
if genecolname in vcfline:
genename=(";"+vcfline).split(";"+genecolname+"=")[1].split(";")[0]
else:
genename=""
return genename
#Function to match operator strings
def get_operator_fn(op):
return {
'<' : operator.lt,
'<=' : operator.le,
'>' : operator.gt,
'>=' : operator.gt,
'=' : operator.eq,
'!=' : operator.ne,
'%' : operator.contains,
}[op]
#Create empty snptable
snptable={}
#read in bedfile
if options.bedfilename is not None:
if str(options.bedfilename).endswith(".gz") is True:
bed=gzip.open(options.bedfilename, "rb")
else:
bed=open(options.bedfile, "r")
bed_lower={}
bed_upper={}
for line_b1 in bed:
line_b=line_b1.rstrip().split('\t')
chr=str(line_b[0]).lower().replace("chr", "")
if chr not in bed_lower:
bed_lower[chr]=[chr, []]
bed_upper[chr]=[chr, []]
bed_lower[chr][1].append(int(line_b[1])+1)
bed_upper[chr][1].append(int(line_b[2]))
bed.close()
vcffile=gzip.open(options.vcffilename, "rb")
for line_vcf1 in vcffile:
line_vcf=line_vcf1.rstrip().split('\t')
keep=1
if line_vcf[0][0]!="#":
if keep==1 and options.passfilter:
if line_vcf[6]!="PASS":
keep=0
if keep==1 and options.snponly:
if len(line_vcf[3])>1 or len(line_vcf[4])>1:
keep=0
if keep==1 and options.indelonly:
if len(line_vcf[3])==1 and len(line_vcf[4])==1:
keep=0
if "," in line_vcf[4]:
keep=0
#Subset on bedfile
if options.bedfilename is not None:
chr=str(line_vcf[0]).lower().replace("chr", "")
temp_index=bisect.bisect(bed_lower[chr][1], int(line_vcf[1]))-1
if temp_index<0:
keep=0
elif int(line_vcf[1])>bed_upper[chr][1][temp_index]:
keep=0
#Go through INFO field filters
if keep==1 and options.includeinfo is not None:
iter=0
while keep==1 and iter<len(options.includeinfo):
filter=options.includeinfo[iter]
keep=test_include_info(filter, line_vcf[7])
iter=iter+1
if keep==1 and options.excludeinfo is not None:
iter=0
while keep==1 and iter<len(options.excludeinfo):
filter=options.excludeinfo[iter]
keep=test_exclude_info(filter, line_vcf[7])
iter=iter+1
#Go through INFO/VEP field filters
if keep==1 and options.vep:
vcfline=line_vcf[7].replace("vep=", "CSQ=")
if "CSQ=" in vcfline:
annots=(";"+vcfline).split(";CSQ=")[1].split(";")[0].split(",")
keep_a = [1] * len(annots)
if options.includevep is not None:
for i in range(0, len(annots), 1):
if len(csq_anno)==len(annots[i].split("|")):
iter=0
while keep_a[i]==1 and iter<len(options.includevep):
filter=options.includevep[iter]
keep_a[i]=test_include_vep(filter, annots[i], csq_anno)
iter=iter+1
if options.excludevep is not None:
for i in range(0, len(annots), 1):
if len(csq_anno)==len(annots[i].split("|")):
iter=0
while keep_a[i]==1 and iter<len(options.excludevep):
filter=options.excludevep[iter]
keep_a[i]=test_exclude_vep(filter, annots[i], csq_anno)
iter=iter+1
if not 1 in keep_a:
keep=0
#If variant meets all filters for at least one transcript, then extract gene name for all ok transcripts
if keep==1:
vcfline=line_vcf[7].replace("vep=", "CSQ=")
if options.vep and "CSQ=" in vcfline:
gene=[]
for i in range(0, len(annots), 1):
if keep_a[i]==1:
gene.append(find_vep_gene(options.genecolname, annots[i], csq_anno))
else:
gene=find_info_gene(options.genecolname, line_vcf[7])
gene=list(set(gene))
if len(gene)>0:
if options.snpformat=="VCFID":
snpid=str(line_vcf[2])
else:
snpid=str(line_vcf[0].lstrip("chr"))+":"+str(line_vcf[1])+":"+str(line_vcf[3])+":"+str(line_vcf[4])
for i in range(0, len(gene), 1):
if gene[i] not in options.genenull.split(","):
if gene[i] not in snptable:
snptable[gene[i]]=[gene[i], [snpid]]
else:
snptable[gene[i]][1].append(snpid)
vcffile.close()
#Write Output
outfile=open(options.outfilename, "w")
outfile.write("#GENE\tSNPS\n")
for x in snptable:
if len(x)>0:
#Read through hash table and print out variants
snp_out=','.join(snptable[x][1])
outfile.write(str(x)+"\t"+snp_out+"\n")
outfile.close()
| 31.238208
| 189
| 0.670668
|
4a02aefdaa84cd3c60bfd7601924b4fe7c9a42e3
| 3,802
|
py
|
Python
|
lib/db_helper.py
|
mrsiesta/reddit_music_collector
|
53af5db9869015d0d315f13aac6a3604ff18563b
|
[
"Apache-2.0"
] | 1
|
2021-08-15T21:33:22.000Z
|
2021-08-15T21:33:22.000Z
|
lib/db_helper.py
|
mrsiesta/reddit_music_collector
|
53af5db9869015d0d315f13aac6a3604ff18563b
|
[
"Apache-2.0"
] | null | null | null |
lib/db_helper.py
|
mrsiesta/reddit_music_collector
|
53af5db9869015d0d315f13aac6a3604ff18563b
|
[
"Apache-2.0"
] | null | null | null |
import sqlite3
import tabulate
import time
from sqlite3 import Error
class sqliteHelper:
def __init__(self, sqlite_path='content-cache.db', ):
"""Initialize our database and ensure the required tables exist"""
required_tables = {
'content': {
'create_statement':
""" CREATE TABLE IF NOT EXISTS content (
id text PRIMARY KEY,
subreddit text,
title text NOT NULL,
artist text,
song_title text,
rank integer,
submission_date integer,
url text,
downloaded bool
);
"""
}
}
self.db_conn = self.create_connection(sqlite_path)
self.cursor = self.db_conn.cursor()
self.cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [t[0] for t in self.cursor.fetchall()]
for table_name, table_data in required_tables.items():
if table_name not in tables:
self.create_table(table_data['create_statement'])
@staticmethod
def create_connection(db_file_path):
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file_path)
return conn
except Error as e:
print(e)
raise e
def create_table(self, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
self.cursor.execute(create_table_sql)
except Error as e:
print(e)
def display_content_table(self):
self.cursor.execute("SELECT * from content")
content_rows = self.cursor.fetchall()
self.cursor.execute('PRAGMA table_info(content)')
table_columns = self.cursor.fetchall()
headers = [row[1] for row in table_columns]
print(tabulate.tabulate(content_rows, headers=headers, tablefmt='fancy'))
def insert_content(self, data):
sql = ''' INSERT INTO content(id,subreddit,title,rank,submission_date,url,downloaded)
VALUES(?,?,?,?,?,?,?) '''
self.cursor.execute(sql, data)
self.db_conn.commit()
return self.cursor.lastrowid
def update_database(self, track_list):
print("Updating content table with new tracks")
for new_track in track_list:
_id = new_track.id
_subreddit = new_track.subreddit.display_name
_title = new_track.title
_rank = int(new_track.score)
_date = time.strftime('%Y/%m/%d %X', time.localtime(float(new_track.created)))
_url = new_track.url_overridden_by_dest
_downloaded = False
_data = (_id, _subreddit, _title, _rank, _date, _url, _downloaded)
self.insert_content(_data)
print("Done inserting new data")
def mark_track_downloaded(self, id):
self.cursor.execute(f"UPDATE content SET downloaded = 1 WHERE id == '{id}'")
self.db_conn.commit()
def check_database_for_id(self, id):
self.cursor.execute(f"SELECT id from content where id == '{id}'")
result = self.cursor.fetchall()
if result:
return True
return False
def update_db_with_track_data(self, id, artist, song_title):
pass
def fetch_undownloaded(self):
self.cursor.execute("select * from content where downloaded == false")
return self.cursor.fetchall()
| 34.563636
| 93
| 0.576276
|
4a02afdb3fd944c81fe9ce3c0b2e5ead948ea138
| 38,981
|
py
|
Python
|
quspin/basis/basis_general/base_general.py
|
Alehud/QuSpin
|
c72d5fb2b2e9cd9a37d6917bba0337faf3b6c201
|
[
"BSD-3-Clause"
] | 195
|
2016-10-24T18:05:31.000Z
|
2022-03-29T10:11:56.000Z
|
quspin/basis/basis_general/base_general.py
|
Alehud/QuSpin
|
c72d5fb2b2e9cd9a37d6917bba0337faf3b6c201
|
[
"BSD-3-Clause"
] | 303
|
2016-10-25T20:08:11.000Z
|
2022-03-31T16:52:09.000Z
|
quspin/basis/basis_general/base_general.py
|
Alehud/QuSpin
|
c72d5fb2b2e9cd9a37d6917bba0337faf3b6c201
|
[
"BSD-3-Clause"
] | 54
|
2017-01-03T18:47:52.000Z
|
2022-03-16T06:54:33.000Z
|
import numpy as _np
import scipy.sparse as _sp
import os,numexpr
from ._basis_general_core.general_basis_utils import basis_int_to_python_int,_get_basis_index
from ._basis_general_core import basis_zeros
from ..lattice import lattice_basis
import warnings
class GeneralBasisWarning(Warning):
pass
def process_map(map,q):
map = _np.asarray(map,dtype=_np.int32)
i_map = map.copy()
i_map[map<0] = -(i_map[map<0] + 1) # site mapping
s_map = map < 0 # sites with spin-inversion
sites = _np.arange(len(map),dtype=_np.int32)
order = sites.copy()
if _np.any(_np.sort(i_map)-order):
raise ValueError("map must be a one-to-one site mapping.")
per = 0
group = [tuple(order)]
while(True):
sites[s_map] = -(sites[s_map]+1)
sites = sites[i_map]
per += 1
group.append(tuple(sites))
if _np.array_equal(order,sites):
break
if per == 1:
warnings.warn("identity mapping found in set of transformations.",GeneralBasisWarning,stacklevel=5)
return map,per,q,set(group)
def check_symmetry_maps(item1,item2):
grp1 = item1[1][-1]
map1 = item1[1][0]
block1 = item1[0]
i_map1 = map1.copy()
i_map1[map1<0] = -(i_map1[map1<0] + 1) # site mapping
s_map1 = map1 < 0 # sites with spin-inversion
grp2 = item2[1][-1]
map2 = item2[1][0]
block2 = item2[0]
i_map2 = map2.copy()
i_map2[map2<0] = -(i_map2[map2<0] + 1) # site mapping
s_map2 = map2 < 0 # sites with spin-inversion
if grp1 == grp2:
warnings.warn("mappings for block {} and block {} produce the same symmetry.".format(block1,block2),GeneralBasisWarning,stacklevel=5)
sites1 = _np.arange(len(map1))
sites2 = _np.arange(len(map2))
sites1[s_map1] = -(sites1[s_map1]+1)
sites1 = sites1[i_map1]
sites1[s_map2] = -(sites1[s_map2]+1)
sites1 = sites1[i_map2]
sites2[s_map2] = -(sites2[s_map2]+1)
sites2 = sites2[i_map2]
sites2[s_map1] = -(sites2[s_map1]+1)
sites2 = sites2[i_map1]
if not _np.array_equal(sites1,sites2):
warnings.warn("using non-commuting symmetries can lead to unwanted behaviour of general basis, make sure that quantum numbers are invariant under non-commuting symmetries!",GeneralBasisWarning,stacklevel=5)
class basis_general(lattice_basis):
def __init__(self,N,block_order=None,**kwargs):
lattice_basis.__init__(self)
self._unique_me = True
self._check_herm = True
self._check_pcon = None
self._basis_pcon = None
self._get_proj_pcon = False
self._made_basis = False # keeps track of whether the basis has been made
self._Ns_block_est = 0 # initialize number of states variable
if self.__class__ is basis_general:
raise TypeError("general_basis class is not to be instantiated.")
kwargs = {key:value for key,value in kwargs.items() if value is not None}
# if not kwargs:
# raise ValueError("require at least one map.")
n_maps = len(kwargs)
if n_maps > 32:
raise ValueError("general basis can only support up to 32 symmetries.")
if n_maps>0:
self._conserved='custom symmetries'
else:
self._conserved=''
if any((type(map) is not tuple) and (len(map)!=2) for map in kwargs.values()):
raise ValueError("blocks must contain tuple: (map,q).")
kwargs = {block:process_map(*item) for block,item in kwargs.items()}
if block_order is None:
# sort by periodicies smallest to largest for speed up
sorted_items = sorted(kwargs.items(),key=lambda x:x[1][1])
# sorted_items.reverse()
else:
block_order = list(block_order)
missing = set(kwargs.keys()) - set(block_order)
if len(missing)>0:
raise ValueError("{} names found in block names but missing from block_order.".format(missing))
missing = set(block_order) - set(kwargs.keys())
if len(missing)>0:
raise ValueError("{} names found in block_order but missing from block names.".format(missing))
block_order.reverse()
sorted_items = [(key,kwargs[key]) for key in block_order]
self._blocks = {block:((-1)**q if per==2 else q) for block,(_,per,q,_) in sorted_items}
self._maps_dict = {block:map for block,(map,_,_,_) in sorted_items}
remove_index = []
for i,item1 in enumerate(sorted_items[:-1]):
if item1[1][1] == 1:
remove_index.append(i)
for j,item2 in enumerate(sorted_items[i+1:]):
check_symmetry_maps(item1,item2)
remove_index.sort()
if sorted_items:
blocks,items = zip(*sorted_items)
items = list(items)
for i in remove_index:
items.pop(i)
n_maps = len(items)
maps,pers,qs,_ = zip(*items)
self._maps = _np.vstack(maps)
self._qs = _np.asarray(qs,dtype=_np.int32)
self._pers = _np.asarray(pers,dtype=_np.int32)
if any(map.ndim != 1 for map in self._maps[:]):
raise ValueError("maps must be a 1-dim array/list of integers.")
if any(map.shape[0] != N for map in self._maps[:]):
raise ValueError("size of map is not equal to N.")
if self._maps.shape[0] != self._qs.shape[0]:
raise ValueError("number of maps must be the same as the number of quantum numbers provided.")
for j in range(n_maps-1):
for i in range(j+1,n_maps,1):
if _np.all(self._maps[j]==self._maps[i]):
ValueError("repeated map in maps list.")
else:
self._maps = _np.array([[]],dtype=_np.int32)
self._qs = _np.array([],dtype=_np.int32)
self._pers = _np.array([],dtype=_np.int32)
nmax = self._pers.prod()
self._n_dtype = _np.min_scalar_type(nmax)
def __getstate__(self):
obj_dict = dict(self.__dict__)
obj_dict.pop("_core")
return obj_dict
# @property
# def _fermion_basis(self):
# return False
@property
def description(self):
"""str: information about `basis` object."""
blocks = ""
for symm in self._blocks:
blocks += symm+" = {"+symm+"}, "
blocks = blocks.format(**self._blocks)
if len(self._conserved) == 0:
symm = "no symmetry"
elif len(self._conserved) == 1:
symm = "symmetry"
else:
symm = "symmetries"
string = """general basis for lattice of N = {0} sites containing {5} states \n\t{1}: {2} \n\tquantum numbers: {4} \n\n""".format(self._N,symm,self._conserved,'',blocks,self._Ns)
string += self.operators
return string
def _int_to_state(self,state,bracket_notation=True):
state = basis_int_to_python_int(state)
n_space = len(str(self.sps))
if self.N <= 64:
bits = (state//int(self.sps**(self.N-i-1))%self.sps for i in range(self.N))
s_str = " ".join(("{:"+str(n_space)+"d}").format(bit) for bit in bits)
else:
left_bits = (state//int(self.sps**(self.N-i-1))%self.sps for i in range(32))
right_bits = (state//int(self.sps**(self.N-i-1))%self.sps for i in range(self.N-32,self.N,1))
str_list = [("{:"+str(n_space)+"d}").format(bit) for bit in left_bits]
str_list.append("...")
str_list.extend(("{:"+str(n_space)+"d}").format(bit) for bit in right_bits)
s_str = (" ".join(str_list))
if bracket_notation:
return "|"+s_str+">"
else:
return s_str.replace(' ', '')
def _state_to_int(self,state):
state = state.replace('|','').replace('>','').replace('<','')
return basis_int_to_python_int(self._basis[self.index(state)])
def _index(self,s):
if type(s) is str:
s = int(s,self.sps)
return _get_basis_index(self.states,s)
def _reduce_n_dtype(self):
if len(self._n)>0:
self._n_dtype = _np.min_scalar_type(self._n.max())
self._n = self._n.astype(self._n_dtype)
def _Op(self,opstr,indx,J,dtype):
if not self._made_basis:
raise AttributeError('this function requires the basis to be constructed first; use basis.make().')
indx = _np.asarray(indx,dtype=_np.int32)
if len(opstr) != len(indx):
raise ValueError('length of opstr does not match length of indx')
if _np.any(indx >= self._N) or _np.any(indx < 0):
raise ValueError('values in indx falls outside of system')
extra_ops = set(opstr) - self._allowed_ops
if extra_ops:
raise ValueError("unrecognized characters {} in operator string.".format(extra_ops))
if self._Ns <= 0:
return _np.array([],dtype=dtype),_np.array([],dtype=self._index_type),_np.array([],dtype=self._index_type)
col = _np.empty(self._Ns,dtype=self._index_type)
row = _np.empty(self._Ns,dtype=self._index_type)
ME = _np.empty(self._Ns,dtype=dtype)
# print(self._Ns)
self._core.op(row,col,ME,opstr,indx,J,self._basis,self._n,self._basis_begin,self._basis_end,self._N_p)
if _np.iscomplexobj(ME):
if ME.dtype == _np.complex64:
mask = ME.real != 0
mask1 = ME.imag != 0
_np.logical_or(mask,mask1,out=mask)
else:
mask = numexpr.evaluate("(real(ME)!=0) | (imag(ME)!=0)")
else:
mask = numexpr.evaluate("ME!=0")
col = col[mask]
row = row[mask]
ME = ME[mask]
return ME,row,col
def _inplace_Op(self,v_in,op_list,dtype,transposed=False,conjugated=False,v_out=None,a=1.0):
if not self._made_basis:
raise AttributeError('this function requires the basis to be constructed first; use basis.make().')
v_in = _np.asanyarray(v_in)
result_dtype = _np.result_type(v_in.dtype,dtype)
v_in = v_in.astype(result_dtype,order="C",copy=False)
if v_in.shape[0] != self.Ns:
raise ValueError("dimension mismatch")
if v_out is None:
v_out = _np.zeros_like(v_in,dtype=result_dtype,order="C")
else:
if v_out.dtype != result_dtype:
raise TypeError("v_out does not have the correct data type.")
if not v_out.flags["CARRAY"]:
raise ValueError("v_out is not a writable C-contiguous array")
if v_out.shape != v_in.shape:
raise ValueError("invalid shape for v_out and v_in: v_in.shape != v_out.shape")
v_out = v_out.reshape((self.Ns,-1))
v_in = v_in.reshape((self.Ns,-1))
for opstr,indx,J in op_list:
indx = _np.ascontiguousarray(indx,dtype=_np.int32)
self._core.inplace_op(v_in,v_out,conjugated,transposed,opstr,indx,a*J,
self._basis,self._n,self._basis_begin,self._basis_end,self._N_p)
return v_out.squeeze()
def Op_shift_sector(self,other_basis,op_list,v_in,v_out=None,dtype=None):
"""Applies symmetry non-conserving operator to state in symmetry-reduced basis.
An operator, which does not conserve a symmetry, induces a change in the quantum number of a state defined in the corresponding symmetry sector. Hence, when the operator is applied on a quantum state, the state shifts the symmetry sector. `Op_shift_sector()` handles this automatically.
:red:`NOTE: One has to make sure that (i) the operator moves the state between the two sectors, and (ii) the two bases objects have the same symmetries. This function will not give the correct results otherwise.`
Formally equivalent to:
>>> P1 = basis_sector_1.get_proj(np.complex128) # projector between full and initial basis
>>> P2 = basis_sector_2.get_proj(np.complex128) # projector between full and target basis
>>> v_in_full = P1.dot(v_in) # go from initial basis to to full basis
>>> v_out_full = basis_full.inplace_Op(v_in_full,op_list,np.complex128) # apply Op
>>> v_out = P2.H.dot(v_out_full) # project to target basis
Notes
-----
* particularly useful when computing correlation functions.
* supports parallelization to multiple states listed in the columns of `v_in`.
* the user is strongly advised to use the code under "Formally equivalent" above to check the results of this function for small system sizes.
Parameters
-----------
other_basis : `basis` object
`basis_general` object for the initial symmetry sector. Must be the same `basis` class type as the basis whose instance is `Op_shift_sector()` (i.e. the basis in `basis.Op_shift_sector()`).
op_list : list
Operator string list which defines the operator to apply. Follows the format `[["z",[i],Jz[i]] for i in range(L)], ["x",[i],Jx[j]] for j in range(L)],...]`.
v_in : array_like, (other_basis.Ns,...)
Initial state to apply the symmetry non-conserving operator on. Must have the same length as `other_basis.Ns`.
v_out : array_like, (basis.Ns,...), optional
Optional array to write the result for the final/target state in.
dtype : numpy dtype for matrix elements, optional
Data type (e.g. `numpy.float64`) to construct the operator with.
Returns
--------
(basis.Ns, ) numpy.ndarray
Array containing the state `v_out` in the current basis, i.e. the basis in `basis.Op_shift_sector()`.
Examples
--------
>>> v_out = basis.Op_shift_sector(initial_basis, op_list, v_in)
>>> print(v_out.shape, basis.Ns, v_in.shape, initial_basis.Ns)
"""
# consider flag to do calc with projectors instead to use as a check.
if not isinstance(other_basis,self.__class__):
raise ValueError("other_basis must be the same type as the given basis.")
if not self._made_basis:
raise AttributeError('this function requires the basis to be constructed first; use basis.make().')
if not other_basis._made_basis:
raise AttributeError('this function requires the basis to be constructed first; use basis.make().')
_,_,J_list = zip(*op_list)
J_list = _np.asarray(J_list)
if dtype is not None:
J_list = J_list.astype(dtype)
v_in = _np.asanyarray(v_in)
result_dtype = _np.result_type(_np.float32,J_list.dtype,v_in.dtype)
v_in = v_in.astype(result_dtype,order="C",copy=False)
v_in = v_in.reshape((other_basis.Ns,-1))
nvecs = v_in.shape[1]
if v_in.shape[0] != other_basis.Ns:
raise ValueError("invalid shape for v_in")
if v_out is None:
v_out = _np.zeros((self.Ns,nvecs),dtype=result_dtype,order="C")
else:
if v_out.dtype != result_dtype:
raise TypeError("v_out does not have the correct data type.")
if not v_out.flags["CARRAY"]:
raise ValueError("v_out is not a writable C-contiguous array")
if v_out.shape != (self.Ns,nvecs):
raise ValueError("invalid shape for v_out")
for opstr,indx,J in op_list:
indx = _np.ascontiguousarray(indx,dtype=_np.int32)
self._core.op_shift_sector(v_in,v_out,opstr,indx,J,
self._basis,self._n,other_basis._basis,other_basis._n)
if nvecs==1:
return v_out.squeeze()
else:
return v_out
def get_proj(self,dtype,pcon=False):
"""Calculates transformation/projector from symmetry-reduced basis to full (symmetry-free) basis.
Notes
-----
* particularly useful when a given operation canot be carried out in the symmetry-reduced basis in a straightforward manner.
* see also `Op_shift_sector()`.
Parameters
-----------
dtype : 'type'
Data type (e.g. numpy.float64) to construct the projector with.
pcon : bool, optional
Whether or not to return the projector to the particle number (magnetisation) conserving basis
(useful in bosonic/single particle systems). Default is `pcon=False`.
Returns
--------
scipy.sparse.csc_matrix
Transformation/projector between the symmetry-reduced and the full basis.
Examples
--------
>>> P = get_proj(np.float64,pcon=False)
>>> print(P.shape)
"""
if not self._made_basis:
raise AttributeError('this function requires the basis to be constructed first; use basis.make().')
basis_pcon = None
Ns_full = (self._sps**self._N)
if pcon and self._get_proj_pcon:
if self._basis_pcon is None:
self._basis_pcon = self.__class__(**self._pcon_args)
basis_pcon = self._basis_pcon._basis
Ns_full = basis_pcon.shape[0]
elif pcon and self._get_proj_pcon:
raise TypeError("pcon=True only works for basis of a single particle number sector.")
sign = _np.ones_like(self._basis,dtype=_np.int8)
c = self._n.astype(dtype,copy=True)
c *= self._pers.prod()
_np.sqrt(c,out=c)
_np.power(c,-1,out=c)
index_type = _np.result_type(_np.min_scalar_type(-Ns_full),_np.int32)
indptr = _np.arange(self._Ns+1,dtype=index_type)
indices = _np.arange(self._Ns,dtype=index_type)
return self._core.get_proj(self._basis,dtype,sign,c,indices,indptr,basis_pcon=basis_pcon)
def project_to(self,v0,sparse=True,pcon=False):
"""Transforms state from full (symmetry-free) basis to symmetry-reduced basis.
Notes
-----
* particularly useful when a given operation cannot be carried out in the full basis.
* supports parallelisation to multiple states listed in the columns.
* inverse function to `project_from`.
Parameters
-----------
v0 : numpy.ndarray
Contains in its columns the states in the full (symmetry-free) basis.
sparse : bool, optional
Whether or not the output should be in sparse format. Default is `True`.
pcon : bool, optional
Whether or not to return the output in the particle number (magnetisation) conserving basis
(useful in bosonic/single particle systems). Default is `pcon=False`.
Returns
--------
numpy.ndarray
Array containing the state `v0` in the symmetry-reduced basis.
Examples
--------
>>> v_symm = project_to(v0)
>>> print(v_symm.shape, v0.shape)
"""
basis_pcon = None
if pcon==True:
if self._basis_pcon is None:
self._basis_pcon = self.__class__(**self._pcon_args,make_basis=False)
self._basis_pcon.make(N_p=0)
basis_pcon = self._basis_pcon._basis
if not self._made_basis:
raise AttributeError('this function requires the basis to be cosntructed first, see basis.make().')
if not hasattr(v0,"shape"):
v0 = _np.asanyarray(v0)
squeeze = False
if pcon:
Ns_full = basis_pcon.size
else:
Ns_full = self._sps**self._N
if v0.ndim == 1:
v0 = v0.reshape((-1,1))
shape = (self._Ns,1)
squeeze = True
elif v0.ndim == 2:
shape = (self._Ns,v0.shape[1])
else:
raise ValueError("excpecting v0 to have ndim > 0 and at most 2")
if self._Ns <= 0:
# CHECK later
if sparse:
return _sp.csr_matrix(([],([],[])),shape=(self._Ns,0),dtype=v0.dtype)
else:
return _np.zeros((self._Ns,0),dtype=v0.dtype)
if v0.shape[0] != Ns_full:
raise ValueError("v0 shape {0} not compatible with Ns_full={1}".format(v0.shape,Ns_full))
if _sp.issparse(v0): # current work around for sparse states.
# return self.get_proj(v0.dtype).dot(v0)
raise ValueError
v0 = _np.ascontiguousarray(v0)
if sparse:
# current work-around for sparse
return self.get_proj(v0.dtype,pcon=pcon).T.dot(_sp.csr_matrix(v0))
else:
v_out = _np.zeros(shape,dtype=v0.dtype,)
self._core.project_to_dense(self._basis,self._n,v0,v_out,basis_pcon=basis_pcon)
if squeeze:
return _np.squeeze(v_out)
else:
return v_out
def get_vec(self,v0,sparse=True,pcon=False):
""" DEPRECATED (cf `project_from`). Transforms state from symmetry-reduced basis to full (symmetry-free) basis.
Notes
-----
This function is :red:`deprecated`. Use `project_from()` instead; see also the inverse function `project_to()`.
"""
return self.project_from(v0,sparse=sparse,pcon=pcon)
def project_from(self,v0,sparse=True,pcon=False):
"""Transforms state from symmetry-reduced basis to full (symmetry-free) basis.
Notes
-----
* particularly useful when a given operation cannot be carried out in the symmetry-reduced basis in a straightforward manner.
* supports parallelisation to multiple states listed in the columns.
* inverse function to `project_to`.
Parameters
-----------
v0 : numpy.ndarray
Contains in its columns the states in the symmetry-reduced basis.
sparse : bool, optional
Whether or not the output should be in sparse format. Default is `True`.
pcon : bool, optional
Whether or not to return the output in the particle number (magnetisation) conserving basis
(useful in bosonic/single particle systems). Default is `pcon=False`.
Returns
--------
numpy.ndarray
Array containing the state `v0` in the full basis.
Examples
--------
>>> v_full = project_from(v0)
>>> print(v_full.shape, v0.shape)
"""
basis_pcon = None
if pcon==True:
if self._basis_pcon is None:
self._basis_pcon = self.__class__(**self._pcon_args,make_basis=False)
self._basis_pcon.make(N_p=0)
basis_pcon = self._basis_pcon._basis
if not self._made_basis:
raise AttributeError('this function requires the basis to be cosntructed first, see basis.make().')
if not hasattr(v0,"shape"):
v0 = _np.asanyarray(v0)
squeeze = False
if pcon:
Ns_full = basis_pcon.size
else:
Ns_full = self._sps**self._N
if v0.ndim == 1:
v0 = v0.reshape((-1,1))
shape = (Ns_full,1)
squeeze = True
elif v0.ndim == 2:
shape = (Ns_full,v0.shape[1])
else:
raise ValueError("excpecting v0 to have ndim > 0 and at most 2")
if self._Ns <= 0:
if sparse:
return _sp.csr_matrix(([],([],[])),shape=(Ns_full,0),dtype=v0.dtype)
else:
return _np.zeros((Ns_full,0),dtype=v0.dtype)
if v0.shape[0] != self._Ns:
raise ValueError("v0 shape {0} not compatible with Ns={1}".format(v0.shape,self._Ns))
if _sp.issparse(v0): # current work around for sparse states.
# return self.get_proj(v0.dtype).dot(v0)
raise ValueError
v0 = _np.ascontiguousarray(v0)
if sparse:
# current work-around for sparse
return self.get_proj(v0.dtype,pcon=pcon).dot(_sp.csc_matrix(v0))
else:
v_out = _np.zeros(shape,dtype=v0.dtype,)
self._core.project_from_dense(self._basis,self._n,v0,v_out,basis_pcon=basis_pcon)
if squeeze:
return _np.squeeze(v_out)
else:
return v_out
def _check_symm(self,static,dynamic,photon_basis=None):
if photon_basis is None:
basis_sort_opstr = self._sort_opstr
static_list,dynamic_list = self._get_local_lists(static,dynamic)
else:
basis_sort_opstr = photon_basis._sort_opstr
static_list,dynamic_list = photon_basis._get_local_lists(static,dynamic)
static_blocks = {}
dynamic_blocks = {}
for block,map in self._maps_dict.items():
key = block+" symm"
odd_ops,missing_ops = _check_symm_map(map,basis_sort_opstr,static_list)
if odd_ops or missing_ops:
static_blocks[key] = (tuple(odd_ops),tuple(missing_ops))
odd_ops,missing_ops = _check_symm_map(map,basis_sort_opstr,dynamic_list)
if odd_ops or missing_ops:
dynamic_blocks[key] = (tuple(odd_ops),tuple(missing_ops))
return static_blocks,dynamic_blocks
def make(self,Ns_block_est=None,N_p=None):
"""Creates the entire basis by calling the basis constructor.
Parameters
-----------
Ns_block_est: int, optional
Overwrites the internal estimate of the size of the reduced Hilbert space for the given symmetries. This can be used to help conserve memory if the exact size of the H-space is known ahead of time.
N_p: int, optional
number of bits to use in the prefix label used to generate blocks for searching positions of representatives.
Returns
--------
int
Total number of states in the (symmetry-reduced) Hilbert space.
Notes
-----
The memory stored in the basis grows exponentially as exactly :math:`2^{N_p+1}`. The default behavior is to use `N_p` such that
the size of the stored information for the representative bounds is approximately as large as the basis. This is not as effective
for basis which small particle numbers as the blocks have very uneven sizes. To not use the blocks just set N_p=0.
Examples
--------
>>> N, Nup = 8, 4
>>> basis=spin_basis_general(N,Nup=Nup,make_basis=False)
>>> print(basis)
>>> basis.make()
>>> print(basis)
"""
if Ns_block_est is not None:
if Ns_block_est > self._Ns_block_est:
Ns = Ns_block_est
else:
Ns = self._Ns_block_est
else:
Ns = max([self._Ns,1000,self._Ns_block_est])
# preallocate variables
basis = basis_zeros(Ns,dtype=self._basis_dtype)
n = _np.zeros(Ns,dtype=self._n_dtype)
# make basis
if self._count_particles and (self._Np is not None):
Np_list = _np.zeros_like(basis,dtype=_np.uint8)
Ns = self._core.make_basis(basis,n,Np=self._Np,count=Np_list)
else:
Np_list = None
Ns = self._core.make_basis(basis,n,Np=self._Np)
if Ns < 0:
raise ValueError("estimate for size of reduced Hilbert-space is too low, please double check that transformation mappings are correct or use 'Ns_block_est' argument to give an upper bound of the block size.")
# sort basis
if type(self._Np) is int or type(self._Np) is tuple or self._Np is None:
if Ns > 0:
self._basis = basis[:Ns].copy()
self._n = n[:Ns].copy()
if Np_list is not None: self._Np_list = Np_list[:Ns].copy()
else:
self._basis = _np.array([],dtype=basis.dtype)
self._n = _np.array([],dtype=n.dtype)
if Np_list is not None: self._Np_list = _np.array([],dtype=Np_list.dtype)
sort_basis = False
else:
sort_basis = True
if Ns > 0:
# self._basis = basis[Ns-1::-1].copy()
# self._n = n[Ns-1::-1].copy()
# if Np_list is not None: self._Np_list = Np_list[Ns-1::-1].copy()
self._basis = basis[:Ns].copy()
self._n = n[:Ns].copy()
if Np_list is not None: self._Np_list = Np_list[:Ns].copy()
else:
self._basis = _np.array([],dtype=basis.dtype)
self._n = _np.array([],dtype=n.dtype)
if Np_list is not None: self._Np_list = _np.array([],dtype=Np_list.dtype)
self._Ns=Ns
self._Ns_block_est=Ns
self._index_type = _np.result_type(_np.min_scalar_type(self._Ns),_np.int32)
self._reduce_n_dtype()
self._made_basis = True
self.make_basis_blocks(N_p=N_p)
def make_basis_blocks(self,N_p=None):
"""Creates/modifies the bounds for representatives based on prefix tages.
Parameters
-----------
N_p: int, optional
number of bits to use in the prefix label used to generate blocks for searching positions of representatives.
Notes
-----
The memory stored in the basis grows exponentially as exactly :math:`2^{N_p+1}`. The default behavior is to use `N_p` such that
the size of the stored information for the representative bounds is approximately as large as the basis. This is not as effective
for basis which small particle numbers as the blocks have very uneven sizes. To not use the blocks just set N_p=0.
Examples
--------
>>> N, Nup = 8, 4
>>> basis=spin_basis_general(N,Nup=Nup,make_basis=False)
>>> print(basis)
>>> basis.make()
>>> print(basis)
"""
if not self._made_basis:
raise ValueError("reference states are not constructed yet. basis must be constructed before calculating blocks")
sps = self.sps
if sps is None:
sps = 2
if N_p is None:
N_p = int(_np.floor(_np.log(self._Ns//2+1)/_np.log(sps)))
else:
N_p = int(N_p)
if len(self._pers) == 0 and self._Np is None:
N_p = 0 # do not use blocks for full basis
self._N_p = min(max(N_p,0),self.N)
if self._N_p > 0:
self._basis_begin,self._basis_end = self._core.make_basis_blocks(self._N_p,self._basis)
else:
self._basis_begin = _np.array([],dtype=_np.intp)
self._basis_end = _np.array([],dtype=_np.intp)
def Op_bra_ket(self,opstr,indx,J,dtype,ket_states,reduce_output=True):
"""Finds bra states which connect given ket states by operator from a site-coupling list and an operator string.
Given a set of ket states :math:`|s\\rangle`, the function returns the bra states :math:`\\langle s'|` which connect to them through an operator, together with the corresponding matrix elements.
Notes
-----
* Similar to `Op` but instead of returning the matrix indices (row,col), it returns the states (bra,ket) in integer representation.
* Does NOT require the full basis (see `basis` optional argument `make_basis`).
* If a state from `ket_states` does not have a non-zero matrix element, it is removed from the returned list. See otional argument `reduce_output`.
Parameters
-----------
opstr : str
Operator string in the lattice basis format. For instance:
>>> opstr = "zz"
indx : list(int)
List of integers to designate the sites the lattice basis operator is defined on. For instance:
>>> indx = [2,3]
J : scalar
Coupling strength.
dtype : 'type'
Data type (e.g. numpy.float64) to construct the matrix elements with.
ket_states : numpy.ndarray(int)
Ket states in integer representation. Must be of same data type as `basis`.
reduce_output: bool, optional
If set to `False`, the returned arrays have the same size as `ket_states`; If set to `True` zeros are purged.
Returns
--------
tuple
`(ME,bra,ket)`, where
* numpy.ndarray(scalar): `ME`: matrix elements of type `dtype`, which connects the ket and bra states.
* numpy.ndarray(int): `bra`: bra states, obtained by applying the matrix representing the operator in the lattice basis,
to the ket states, such that `bra[i]` corresponds to `ME[i]` and connects to `ket[i]`.
* numpy.ndarray(int): `ket`: ket states, such that `ket[i]` corresponds to `ME[i]` and connects to `bra[i]`.
Examples
--------
>>> J = 1.41
>>> indx = [2,3]
>>> opstr = "zz"
>>> dtype = np.float64
>>> ME, bra, ket = Op_bra_ket(opstr,indx,J,dtype,ket_states)
"""
indx = _np.asarray(indx,dtype=_np.int32)
ket_states=_np.array(ket_states,dtype=self._basis.dtype,ndmin=1)
if len(opstr) != len(indx):
raise ValueError('length of opstr does not match length of indx')
if _np.any(indx >= self._N) or _np.any(indx < 0):
raise ValueError('values in indx falls outside of system')
extra_ops = set(opstr) - self._allowed_ops
if extra_ops:
raise ValueError("unrecognized characters {} in operator string.".format(extra_ops))
bra = _np.zeros_like(ket_states) # row
ME = _np.zeros(ket_states.shape[0],dtype=dtype)
self._core.op_bra_ket(ket_states,bra,ME,opstr,indx,J,self._Np)
if reduce_output:
# remove nan's matrix elements
mask = _np.logical_not(_np.logical_or(_np.isnan(ME),_np.abs(ME)==0.0))
bra = bra[mask]
ket_states = ket_states[mask]
ME = ME[mask]
else:
mask = _np.isnan(ME)
ME[mask] = 0.0
return ME,bra,ket_states
def representative(self,states,out=None,return_g=False,return_sign=False):
"""Maps states to their representatives under the `basis` symmetries.
Parameters
-----------
states : array_like(int)
Fock-basis (z-basis) states to find the representatives of. States are stored in integer representations.
out : numpy.ndarray(int), optional
variable to store the representative states in. Must be a `numpy.ndarray` of same datatype as `basis`, and same shape as `states`.
return_g : bool, optional
if set to `True`, the function also returns the integer `g` corresponding to the number of times each basis symmetry needs to be applied to a given state to obtain its representative.
return_sign : bool, optional
if set to `True`, the function returns the `sign` of the representative relative to the original state (nontrivial only for fermionic bases).
Returns
--------
tuple
( representatives, g_array, sign_array )
* array_like(int): `representatives`: Representatives under `basis` symmetries, corresponding to `states`.
* array_like(int): `g_array` of size (number of states, number of symmetries). Requires `return_g=True`. Contains integers corresponding to the number of times each basis symmetry needs to be applied to a given state to obtain its representative.
* array_like(int): `sign_array` of size (number of states,). Requires `return_sign=True`. Contains `sign` of the representative relative to the original state (nontrivial only for fermionic bases).
Examples
--------
>>> basis=spin_basis_general(N,Nup=Nup,make_basis=False)
>>> s = 17
>>> r = basis.representative(s)
>>> print(s,r)
"""
states = _np.asarray(states,order="C",dtype=self._basis.dtype)
states = _np.atleast_1d(states)
if states.ndim != 1:
raise TypeError("dimension of array_like states must not exceed 1.")
if return_g:
g_out=_np.zeros((states.shape[0],self._qs.shape[0] ), dtype=_np.int32, order='C')
if return_sign:
sign_out=_np.zeros(states.shape, dtype=_np.int8, order='C')
if out is None:
out=_np.zeros(states.shape,dtype=self._basis.dtype,order="C")
if return_g and return_sign:
self._core.representative(states,out,g_out=g_out,sign_out=sign_out)
return out, g_out, sign_out
elif return_g:
self._core.representative(states,out,g_out=g_out)
return out, g_out
elif return_sign:
self._core.representative(states,out,sign_out=sign_out)
return out, sign_out
else:
self._core.representative(states,out)
return out
else:
if not isinstance(out,_np.ndarray):
raise TypeError('out must be a numpy.ndarray')
if states.shape!=out.shape:
raise TypeError('states and out must have same shape.')
if out.dtype != self._basis.dtype:
raise TypeError('out must have same type as basis')
if not out.flags["CARRAY"]:
raise ValueError("out must be C-contiguous array.")
if return_g and return_sign:
self._core.representative(states,out,g_out=g_out,sign_out=sign_out)
return g_out,sign_out
elif return_g:
self._core.representative(states,out,g_out=g_out)
return g_out
elif return_sign:
self._core.representative(states,out,sign_out=sign_out)
return sign_out
else:
self._core.representative(states,out)
def normalization(self,states,out=None):
"""Computes normalization of `basis` states.
Notes
------
* Returns zero, if the state is not part of the symmetry-reduced basis.
* The normalizations can be used to compute matrix elements in the symmetry-reduced basis.
Parameters
-----------
states : array_like(int)
Fock-basis (z-basis) states to find the normalizations of. States are stored in integer representations.
out : numpy.ndarray(unsigned int), optional
variable to store the normalizations of the states in. Must be a `numpy.ndarray` of datatype `unsigned int` (e.g. `numpy.uint16`), and same shape as `states`.
Returns
--------
array_like(int)
normalizations of `states` for the given (symmetry-reduced) `basis`.
Examples
--------
>>> basis=spin_basis_general(N,Nup=Nup,make_basis=False)
>>> s = 17
>>> norm_s = basis.normalization(s)
>>> print(s,norm_s)
"""
states = _np.asarray(states,order="C",dtype=self._basis.dtype)
states = _np.atleast_1d(states)
if out is None:
# determine appropriate dtype
out_dtype=_np.min_scalar_type(_np.iinfo(self._n_dtype).max*self._pers.prod())
out=_np.zeros(states.shape,dtype=out_dtype)
self._core.normalization(states,out)
# reduce dtype
out_dtype = _np.min_scalar_type(out.max())
out = out.astype(out_dtype)
return out.squeeze()
else:
if states.shape!=out.shape:
raise TypeError('states and out must have same shape.')
if _np.issubdtype(out.dtype, _np.signedinteger):
raise TypeError('out must have datatype numpy.uint8, numpy.uint16, numpy.uint32, or numpy.uint64.')
if not out.flags["CARRAY"]:
raise ValueError("out must be C-contiguous array.")
self._core.normalization(states,out)
out_dtype = _np.min_scalar_type(out.max())
out = out.astype(out_dtype)
def get_amp(self,states,out=None,amps=None,mode='representative'):
"""Computes the rescale factor of state amplitudes between the symmetry-reduced and full basis.
Given a quantum state :math:`s` and a state amplitude in the full basis :math:`\\psi_s`, its representative (under the symemtries)
:math:`r(s)` with a corresponding amplitude :math:`\\psi^\\text{sym}_r`, the function computes the ratio :math:`C`, defined as
.. math::
\\psi_s = C\\psi_r^\\text{sym}
Notes
------
* Particularly useful when a given operation cannot be carried away in the symmetry-reduced basis in a straightforward manner.
* To transform an entire state from a symmetry-reduced basis to the full (symmetry-free) basis, use the `basis.get_vec()` function.
* Returns zero, if the state passed to the function is not part of the symmetry-reduced basis.
* If `amps` is passed, the user has to make sure that the input data in `amps` correspond to the `states`.
* The function assumes that `states` comply with the particle conservation symmetry the `basis` was constructed with.
Parameters
-----------
states : array_like(int)
Fock-basis (z-basis) states to find the amplitude rescale factor :math:`C` of. States are stored in integer representations.
out : numpy.ndarray(float), optional
variable to store the rescale factors :math:`C` of the states in. Must be a real or complex-valued `numpy.ndarray` of the same shape as `states`.
amps : numpy.ndarray(float), optional
array of amplitudes to rescale by the amplitude factor :math:`C` (see `mode`). Updated in-place. Must be a real or complex-valued `numpy.ndarray` of the same shape as `states`.
mode : string, optional
* if `mode='representative'` (default), then the function assumes that
(i) `states` already contains representatives (i.e. states in the symmetry-reduced basis);
(ii) `amps` (if passed) are amplitudes in the symmetry-reduced basis (:math:`\\psi_r^\\text{symm}`). The function will update `amps` in-place to :math:`\\psi_s`.
* if `mode='full_basis'`, then the function assumes that
(i) `states` contains full-basis states (the funciton will compute the corresponding representatives);
(ii) `amps` (if passed) are amplitudes in the full basis (:math:`\\psi_s`). The function will update `amps` in-place to :math:`\\psi_r^\\text{symm}`;
**Note**: the function will also update the variable `states` in place with the corresponding representatives.
Returns
--------
array_like(float)
amplitude rescale factor :math:`C` (see expression above).
Examples
--------
>>> C = get_amp(states,out=None,amps=None,mode='representative')
"""
states = _np.asarray(states,order="C",dtype=self._basis.dtype)
states = _np.atleast_1d(states)
states_shape=states.shape
if out is not None:
if states_shape!=out.shape:
raise TypeError('states and out must have same shape.')
if out.dtype not in [_np.float32, _np.float64, _np.complex64, _np.complex128]:
raise TypeError('out must have datatype numpy.float32, numpy.float64, numpy.complex64, or numpy.complex128.')
if not out.flags["CARRAY"]:
raise ValueError("out must be C-contiguous array.")
elif amps is not None:
out=_np.zeros(states_shape,dtype=amps.dtype)
else:
out=_np.zeros(states_shape,dtype=_np.complex128)
self._core.get_amp(states,out,states_shape[0],mode)
if amps is not None:
if states.shape!=amps.shape:
raise TypeError('states and amps must have same shape.')
if mode=='representative':
amps*=out # compute amplitudes in full basis
elif mode=='full_basis':
amps/=out # compute amplitudes in symmetery-rduced basis
else:
raise ValueError("mode accepts only the values 'representative' and 'full_basis'.")
return out.squeeze()
def _check_symm_map(map,sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
J = op[2]
for j,ind in enumerate(op[1]):
i = map[ind]
if i < 0:
if opstr[j] == "n":
odd_ops.append(op)
J *= (-1 if opstr[j] in ["z","y"] else 1)
opstr = opstr.replace("+","#").replace("-","+").replace("#","-")
i = -(i+1)
indx[j] = i
new_op = list(op)
new_op[0] = opstr
new_op[1] = indx
new_op[2] = J
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
| 32.674769
| 289
| 0.699161
|
4a02b150473e8782f17a3bf4f5af48b8aebcaf16
| 19,442
|
py
|
Python
|
src/FACE_DIARIZATION/C_FacialDetection/FaceDetectorAndEncoder.py
|
cristinalunaj/WI-IAT20_PopularityModule
|
0a4894e2b889bf31ea1a8beab3025d5dd0b1ed47
|
[
"MIT"
] | null | null | null |
src/FACE_DIARIZATION/C_FacialDetection/FaceDetectorAndEncoder.py
|
cristinalunaj/WI-IAT20_PopularityModule
|
0a4894e2b889bf31ea1a8beab3025d5dd0b1ed47
|
[
"MIT"
] | null | null | null |
src/FACE_DIARIZATION/C_FacialDetection/FaceDetectorAndEncoder.py
|
cristinalunaj/WI-IAT20_PopularityModule
|
0a4894e2b889bf31ea1a8beab3025d5dd0b1ed47
|
[
"MIT"
] | null | null | null |
"""
Face detection and encoding
Our approach is based on visual information.
Run through the frames of a program, and detect as many faces as
possible using MTCNN [1] or HaarCascade [2]
For each detected face, encode its features as a vector
embedding, thanks to the Facenet model [3].
That way, each face, no matter from whom, available in a broadcast
will be accesible as a rich latent representation.
[1]: https://github.com/ipazc/mtcnn (code) // https://kpzhang93.github.io/MTCNN_face_detection_alignment/paper/spl.pdf (paper)
[2]: https://github.com/opencv/opencv/tree/master/data/haarcascades
[3]: https://arxiv.org/abs/1503.03832 (paper)
author: Cristina Luna, Ricardo Kleinlein
date: 05/2020
Usage:
python3 FaceDetectorAndEncoder.py
--face-detector MTCNN
--root-input-folder ~/RTVE2018DB/test/GENERATED_ME/DATASET_GOOGLE_IMGS/refactor_DS
--program-name None
--output-dir ~/RTVE2018DB/test/GENERATED_ME/DATASET_GOOGLE_IMGS/VIDEO_DB_MTCNN
--program-participants-folder ~/RTVE2018DB/test/rttm_INFO/FACEREF/participants
--imgs-2-maintain 100
--face-threshold 0.98
--extract-single-face False
Options:
--face-detector: Face detector (Options: MTCNN or HaarCascade)
--encoding-model: Path to the encoding model pretrained weigths
[default: ../../../data/models/pre_trained_models/face_embs_model/facenet_keras.h5]
--input-frames-folder: Path to folder with the frames of the videos
--video-name: Name of the target video folder (if None, then process all videos in folder 'input_imgs_folder')
--output-dir: Directory to save results in
--imgs-2-maintain: Number of images per participant/program to maintain. [default: All]
--face-threshold: Probability[0-1] of accept a face as correct . Those faces below the face_threshold will not be considered in what follows.
--extract-single-face: True if we want to extract a single face from frame (This face will be the biggest one in the image)
--quiet Hide visual information
-h, --help Display script additional help
"""
import os, time
from PIL import Image
import numpy as np
from numpy import savez_compressed
from keras.models import load_model
import cv2
from mtcnn.mtcnn import MTCNN
from src.BaseArgs import FaceDetEncArgs
import src.utils.loader as loader
default_path_HaarCascade = "../../../data/models/pre_trained_models/face_detectors/haarcascade_frontalface_default.xml"
class FaceDetectorAndEncoder():
def __init__(self, face_detector, encoding_model, root_input_folder, path_participants_folder,
output_dir, program_name, imgs_after_filtering=100):
self.root_input_folder = root_input_folder
self.programs = [file.split(".")[0] for file in os.listdir(path_participants_folder)] \
if(program_name == None or program_name == 'None') else [program_name]
self.output_dir = output_dir
self.imgs_after_filtering = imgs_after_filtering
self.flag_face_detector = 0
self.face_detector = self._load_face_detection_model(face_detector) # MTCNN
self.encoding_model = load_model(encoding_model, compile=False) # FaceNet
def _load_face_detection_model(self, detector="MTCNN"):
"""
Load face detector. Two possible options: MTCNN or HaarCascade
:param detector: flag with the name of the detector to use
:return: loaded model detector
"""
if(detector=="MTCNN"):
self.flag_face_detector = "MTCNN"
return MTCNN()
elif(detector=="HaarCascade"):
#HAARCASCADE -> haarcascade_frontalface_default.xml
self.flag_face_detector = "HaarCascade"
return cv2.CascadeClassifier(default_path_HaarCascade)
def get_embedding(self, face_pixels):
"""
Get embeddings from face image pixels using FaceNet model
Embeddings extracted from FaceNet that represents the face in a 128-dimensional-latent-space
:param face_pixels: (array) Value of faces pixels detected
:return: encoded face as embedding
"""
# scale pixel values
face_pixels = face_pixels.astype('float32')
# standardize pixel values across channels (global)
mean, std = face_pixels.mean(), face_pixels.std()
face_pixels = (face_pixels - mean) / std
# transform face into one sample
samples = np.expand_dims(face_pixels, axis=0)
# make prediction to get embedding
yhat = self.encoding_model.predict(samples)
return yhat[0]
def detect_face_MTCNN(self,rgb_image):
"""
Detect faces with MTCNN
:param rgb_image: image in RGB
:return: detected faces as dict with MTCNN detection information [bbox, confidence...]
"""
return self.face_detector.detect_faces(rgb_image)
def detect_face_HaarCascade(self,frame_path):
"""
Detect faces with HaarCascade
:param frame_path: image in grayscale
:return: detected faces as dict with HaarCascade detection information reported as MTCNN [bbox]
"""
gray_image = loader.load_image(frame_path, colormode="grayscale")
gray_image = np.squeeze(gray_image)
gray_image = gray_image.astype('uint8')
faces = self.face_detector.detectMultiScale(gray_image, 1.3, 5)
faces_as_dict = [{'box': f,
'confidence': 1.0} for f in faces]
return faces_as_dict
def create_output_folders(self, program,ID, sub_folder_extra_name="", create_face_folder=True):
"""
Create output sub-folders to save the embeddings generated, bbox, face images and the dicts generated
by MTCNN (mtcnn_debug saves info about faces size,position,confidence...)
:param ID: name of the folder with the ID frames
:return: output folders path
"""
if(create_face_folder):
output_path_face = os.path.join(self.output_dir, program, "faces"+sub_folder_extra_name,ID)
os.makedirs(output_path_face, exist_ok=True)
else:
output_path_face = ""
output_path_bbox = os.path.join(self.output_dir,program, "boundingboxes"+sub_folder_extra_name, ID)
output_path_emb = os.path.join(self.output_dir,program, "embeddings"+sub_folder_extra_name, ID)
output_path_mtcnn = os.path.join(self.output_dir,program, "mtcnn_debug"+sub_folder_extra_name,ID)
if (not os.path.exists(output_path_bbox)):
os.makedirs(output_path_bbox)
os.makedirs(output_path_mtcnn)
os.makedirs(output_path_emb)
return output_path_face,output_path_bbox, output_path_emb, output_path_mtcnn
def extract_biggest_face(self, detected_faces, confidence_th):
"""
Extract the biggest face in detected_faces
:param detected_faces: List of detected faces by MTCNN or HaarCascade
:return: AList with single element, the biggest face
"""
single_face_dict = {}
max_size = 0
for face in detected_faces:
if(face["confidence"]>=confidence_th):
_, _, width, height = face['box']
face_size = width*height
if(face_size>max_size):
max_size = face_size
single_face_dict = face
return [single_face_dict]
def detect_and_encode_faces(self,required_size = (160, 160), extract_single_face=False, probability_of_being_face_accepted=0.0):
"""
Get embeddings and bounding boxes from images. The function will save the bounding box of the faces detected, their embeddings after passing through FaceNet,
the metadata generated by MTCNN and their cutted faces
:param required_size: Size that the face encoder (FaceNet) expects as input
"""
for program in self.programs:
input_path_program = os.path.join(self.root_input_folder, program)
for id in sorted(os.listdir(input_path_program)):
if (os.path.isdir(os.path.join(input_path_program, id)) and
not os.path.exists(os.path.join(self.output_dir,program, "boundingboxes", id))):
print("Extracting faces & bbox from program: ", program, " - id: ", id)
#Define and create ROOT output dirs
output_path_video,output_path_bbox, output_path_emb, output_path_mtcnn = self.create_output_folders(program,id)
# path of identities/programs with their iamges or frames
path_frames = os.path.join(input_path_program, id)
# frames or images
for frame in sorted(os.listdir(path_frames)):
print("processing ", frame)
path_frame = os.path.join(input_path_program, id, frame)
frame_name = frame.split(".")[0]
try:
X, y, mtcnn_info, emb_data = list(), list(), list(), list()
face_queries,labels,mtcnn_metadata,total_embeddings = list(), list(), list(), list()
# load image from file, convert to RGB and convert to array
rgb_img = np.asarray(Image.open(path_frame).convert('RGB'))
#get bounding boxes using MTCNN
detected_faces = self.detect_face_MTCNN(rgb_img) if(self.flag_face_detector=="MTCNN") \
else self.detect_face_HaarCascade(path_frame)
#Extract single face (the biggest one) - e.g. for OCR Images
if(extract_single_face):
detected_faces = self.extract_biggest_face(detected_faces, probability_of_being_face_accepted)
# Extract the bounding box of all the faces detected on the photo
if (len(detected_faces) != 0):
for n in range(len(detected_faces)):
x1, y1, width, height = detected_faces[n]['box']
# Fix a possible bug
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
#Convert image into a numpy array
image = Image.fromarray(rgb_img[y1:y2, x1:x2])
resized_img = image.resize(required_size)
#save_img
resized_img.save(os.path.join(output_path_video, frame_name+"_"+str(n)+".png"))
# Change dimension of face to FaceNet input image size (160x160)
face_array = np.asarray(resized_img)
#get embeddings using FaceNet
emb_data.append(self.get_embedding(face_array))
face_queries.append(face_array)
labels.append(id+"/"+frame)
mtcnn_metadata.append(detected_faces[n])
# Save results
X.extend(face_queries)
y.extend(labels)
mtcnn_info.extend(mtcnn_metadata)
total_embeddings.extend(emb_data)
savez_compressed(os.path.join(output_path_bbox, frame_name + ".npz"), np.asarray(X), np.asarray(y))
savez_compressed(os.path.join(output_path_mtcnn, frame_name + ".npz"), np.asarray(mtcnn_info), np.asarray(y))
savez_compressed(os.path.join(output_path_emb, frame_name + ".npz"), np.asarray(total_embeddings), np.asarray(y))
except:
print('ERROR DETECTED IN : {:s}'.format(path_frame))
def filter_bbox_embs(self,to_filter="embeddings",probability_of_being_face_accepted = 0.98, face_size_accepted = 80*80):
"""
Filter the number of images/embs/bboxes to use to the number indicated in imgs_2_maintain
Args:
root_path (str): Input root path of the extracted embeddings/bboxes
output_path (str): Path in which we will create the new folders with the extraced information (bbox/faces/embeddings...) of the reduced number of images
input_path_MTCNN_debug (str): Input root path of the mtcnn_metadata
imgs_2_maintain (int): Number of photos to maintain from those donwloaded
probability_of_being_face_accepted (double): Probability extraxted by MTCNN of being accepted as face
face_size_accepted (int): Minimum size of face accpeted (in pixels)
"""
for program in self.programs:
input_path_non_filtered_embs = os.path.join(self.output_dir, program, to_filter)
output_path_filtered = os.path.join(self.output_dir, program,to_filter + "_" + str(self.imgs_after_filtering))
for id in sorted(os.listdir(input_path_non_filtered_embs)):
print("Filtering program: ", program, " - id: ", id)
if (os.path.exists(os.path.join(output_path_filtered, id))):
continue
os.makedirs(os.path.join(output_path_filtered, id), exist_ok=True)
counter_imgs_copied = 0
list_of_embs = sorted(os.listdir(os.path.join(input_path_non_filtered_embs, id)))
for emb in list_of_embs:
embs_final = []
labels_final = []
# embs
path_embs_output = os.path.join(output_path_filtered, id, emb)
path_embs_input = os.path.join(input_path_non_filtered_embs, id, emb)
data_embs = np.load(path_embs_input, allow_pickle=True)
embs_info, embs_labels = data_embs['arr_0'], data_embs['arr_1']
# mtcnn
path_mtcnn_debug_input = os.path.join(self.output_dir, program,"mtcnn_debug", id, emb)
data_mtcnn = np.load(path_mtcnn_debug_input, allow_pickle=True)
mtcnn_info, mtcnn_labels = data_mtcnn['arr_0'], data_mtcnn['arr_1']
for sub_face_index in range(len(mtcnn_info)):
_, _, width, height = mtcnn_info[sub_face_index]['box']
face_size = width * height
probability_of_being_face = mtcnn_info[sub_face_index]["confidence"]
if (face_size < face_size_accepted or probability_of_being_face < probability_of_being_face_accepted):
continue
else:
embs_final.append(embs_info[sub_face_index])
labels_final.append(embs_labels[sub_face_index])
# image 2 copy
if (counter_imgs_copied < self.imgs_after_filtering and len(embs_final) >= 1):
# shutil.copy(path_embs_input, path_embs_output)
savez_compressed(path_embs_output, np.asarray(embs_final), np.asarray(labels_final))
else:
continue
counter_imgs_copied += 1
def generate_compact_npz(self,to_filter="embeddings", replace_by_spaces=False):
"""
Compact the inidvidual embs/bbox in a single compressed file (.npz) per participant/program
Args:
input_path (str): Input root path of the extracted embeddings/bbox organised in folders per user
output_path (str): Path in which we will save the new compressed embeddings/bboxes
"""
for program in self.programs:
input_path = os.path.join(self.output_dir, program,to_filter + "_" + str(self.imgs_after_filtering))
output_path = os.path.join(self.output_dir, program, to_filter+"_sum")
for id in sorted(os.listdir(input_path)):
input_id_embs = os.path.join(input_path, id)
list_of_embs = sorted(os.listdir(input_id_embs))
embs_totales_query = list()
etiquetas_embs_total = list()
for emb in list_of_embs:
ruta_embedding = os.path.join(input_id_embs, emb)
data_emb = np.load(ruta_embedding, allow_pickle=True)
emb_info, etiquetas_emb = data_emb['arr_0'], data_emb['arr_1']
for value in emb_info:
embs_totales_query.append(value)
for value in etiquetas_emb:
etiquetas_embs_total.append(value)
embs_final = np.asarray(embs_totales_query)
embs_label = np.asarray(etiquetas_embs_total)
if (len(embs_final) >= 1):
os.makedirs(output_path, exist_ok=True)
if (replace_by_spaces):
new_id_name = id.replace("_", " ")
else:
new_id_name = id
np.savez_compressed(output_path + '/' + new_id_name, embs_final, embs_label)
if __name__ == "__main__":
face_detEnc_args_obj = FaceDetEncArgs()
args = face_detEnc_args_obj.parse()
if(args.imgs_2_maintain <=0):
imgs_after_filtering = 2000000
else:
imgs_after_filtering = args.imgs_2_maintain
face_det_enc_obj = FaceDetectorAndEncoder(args.face_detector, args.encoding_model, args.root_input_folder,
args.program_participants_folder, args.output_dir,
args.program_name, imgs_after_filtering = imgs_after_filtering)
#MTCNN & FaceNet
face_det_enc_obj.detect_and_encode_faces(extract_single_face=args.extract_single_face,probability_of_being_face_accepted = args.face_threshold)
#Filter results
face_det_enc_obj.filter_bbox_embs(to_filter="embeddings",probability_of_being_face_accepted = args.face_threshold,
face_size_accepted = 80*80)
face_det_enc_obj.filter_bbox_embs(to_filter="boundingboxes", probability_of_being_face_accepted= args.face_threshold,
face_size_accepted= 80 * 80)
face_det_enc_obj.filter_bbox_embs(to_filter="mtcnn_debug", probability_of_being_face_accepted= args.face_threshold,
face_size_accepted= 80 * 80)
#Generate compact version of filtered data:
time.sleep(3 * 60) # 3 min -> wait to let filter finish the copy/paste/modification of the data
replace_by_spaces = True #True en LN24H False en L6N (para Google y program, True en OCR)
face_det_enc_obj.generate_compact_npz(to_filter="embeddings", replace_by_spaces=replace_by_spaces)
face_det_enc_obj.generate_compact_npz(to_filter="boundingboxes", replace_by_spaces=replace_by_spaces)
face_det_enc_obj.generate_compact_npz(to_filter="mtcnn_debug", replace_by_spaces=replace_by_spaces)
| 54.920904
| 168
| 0.619638
|
4a02b151c3004ca3244a7bb0c3f041e3f6dd8aef
| 820
|
py
|
Python
|
Problem Solving/Data Structures/Stacks/Waiter.py
|
MonwarAdeeb/HackerRank-Solutions
|
571327e9688061745000ae81c5fd74ff7a2976d4
|
[
"MIT"
] | null | null | null |
Problem Solving/Data Structures/Stacks/Waiter.py
|
MonwarAdeeb/HackerRank-Solutions
|
571327e9688061745000ae81c5fd74ff7a2976d4
|
[
"MIT"
] | null | null | null |
Problem Solving/Data Structures/Stacks/Waiter.py
|
MonwarAdeeb/HackerRank-Solutions
|
571327e9688061745000ae81c5fd74ff7a2976d4
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import sys
n, q = input().strip().split(' ')
n, q = [int(n), int(q)]
number = list(map(int, input().strip().split(' ')))
# get the prime numbers
lower = 2
upper = 10000
prime = [i for i in range(lower, upper + 1) if all(i %
j != 0 for j in range(2, i))]
# set Two-dimensional array
A = [[] for i in range(q + 1)]
B = [[] for i in range(q + 1)]
A[0] = number
# pick up plates from A stack
for i in range(q):
for j in range(len(A[i])):
n = A[i].pop()
if n % prime[i] == 0:
B[i].append(n)
else:
A[i+1].append(n)
# print plates in B
for i in range(len(B)):
while B[i] != []:
print(B[i].pop(),)
# print plates in A
for i in range(len(A)):
while A[i] != []:
print(A[i].pop(),)
| 21.578947
| 80
| 0.486585
|
4a02b1ea7efaccbef4949de5a93be0830d7f819b
| 25,911
|
py
|
Python
|
retrace/dxgiretrace.py
|
groleo/apitrace
|
b192ca20379790c942ab448fa7cf52891f901d0b
|
[
"MIT"
] | null | null | null |
retrace/dxgiretrace.py
|
groleo/apitrace
|
b192ca20379790c942ab448fa7cf52891f901d0b
|
[
"MIT"
] | null | null | null |
retrace/dxgiretrace.py
|
groleo/apitrace
|
b192ca20379790c942ab448fa7cf52891f901d0b
|
[
"MIT"
] | null | null | null |
##########################################################################
#
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""D3D retracer generator."""
import sys
from dllretrace import DllRetracer as Retracer
import specs.dxgi
from specs.stdapi import API
from specs.winapi import LPCSTR
from specs.dxgi import dxgi
from specs.d3d10 import d3d10, d3d10_1
from specs.d3d11 import d3d11
from specs.dcomp import dcomp
class D3DRetracer(Retracer):
def retraceApi(self, api):
print '// Swizzling mapping for lock addresses, mapping a (pDeviceContext, pResource, Subresource) -> void *'
print 'typedef std::pair< IUnknown *, UINT > SubresourceKey;'
print 'static std::map< IUnknown *, std::map< SubresourceKey, void * > > g_Maps;'
print
self.table_name = 'd3dretrace::dxgi_callbacks'
Retracer.retraceApi(self, api)
createDeviceFunctionNames = [
"D3D10CreateDevice",
"D3D10CreateDeviceAndSwapChain",
"D3D10CreateDevice1",
"D3D10CreateDeviceAndSwapChain1",
"D3D11CreateDevice",
"D3D11CreateDeviceAndSwapChain",
]
def invokeFunction(self, function):
if function.name in self.createDeviceFunctionNames:
# create windows as neccessary
if 'pSwapChainDesc' in function.argNames():
print r' d3dretrace::createWindowForSwapChain(pSwapChainDesc);'
# Compensate for the fact we don't trace DXGI object creation
if function.name.startswith('D3D11CreateDevice'):
print r' if (DriverType == D3D_DRIVER_TYPE_UNKNOWN && !pAdapter) {'
print r' DriverType = D3D_DRIVER_TYPE_HARDWARE;'
print r' }'
if function.name.startswith('D3D10CreateDevice'):
# Toggle debugging
print r' if (retrace::debug >= 2) {'
print r' Flags |= D3D10_CREATE_DEVICE_DEBUG;'
print r' } else if (retrace::debug < 0) {'
print r' Flags &= ~D3D10_CREATE_DEVICE_DEBUG;'
print r' }'
# Force driver
self.forceDriver('D3D10_DRIVER_TYPE')
if function.name.startswith('D3D11CreateDevice'):
# Toggle debugging
print r' if (retrace::debug >= 2) {'
print r' Flags |= D3D11_CREATE_DEVICE_DEBUG;'
print r' } else if (retrace::debug < 0) {'
print r' Flags &= ~D3D11_CREATE_DEVICE_DEBUG;'
print r' }'
# Force driver
self.forceDriver('D3D_DRIVER_TYPE')
Retracer.invokeFunction(self, function)
def doInvokeFunction(self, function):
Retracer.doInvokeFunction(self, function)
# Handle missing debug layer. While it's possible to detect whether
# the debug layers are present, by creating a null device, and checking
# the result. It's simpler to retry.
if function.name.startswith('D3D10CreateDevice'):
print r' if ((_result == E_FAIL || _result == DXGI_ERROR_SDK_COMPONENT_MISSING) && (Flags & D3D10_CREATE_DEVICE_DEBUG)) {'
print r' retrace::warning(call) << "Direct3D 10.x SDK Debug Layer (d3d10sdklayers.dll) not available, continuing without debug output\n";'
print r' Flags &= ~D3D10_CREATE_DEVICE_DEBUG;'
Retracer.doInvokeFunction(self, function)
print r' }'
if function.name.startswith('D3D11CreateDevice'):
print r' if ((_result == E_FAIL || _result == DXGI_ERROR_SDK_COMPONENT_MISSING) && (Flags & D3D11_CREATE_DEVICE_DEBUG)) {'
print r' retrace::warning(call) << "Direct3D 11.x SDK Debug Layer (d3d11*sdklayers.dll) not available, continuing without debug output\n";'
print r' Flags &= ~D3D11_CREATE_DEVICE_DEBUG;'
Retracer.doInvokeFunction(self, function)
print r' }'
def handleFailure(self, interface, methodOrFunction):
# Catch when device is removed, and report the reason.
if interface is not None:
print r' if (_result == DXGI_ERROR_DEVICE_REMOVED) {'
getDeviceRemovedReasonMethod = interface.getMethodByName("GetDeviceRemovedReason")
if getDeviceRemovedReasonMethod is not None:
print r' HRESULT _reason = _this->GetDeviceRemovedReason();'
print r' retrace::failed(call, _reason);'
getDeviceMethod = interface.getMethodByName("GetDevice")
if getDeviceMethod is not None and len(getDeviceMethod.args) == 1:
print r' com_ptr<%s> _pDevice;' % getDeviceMethod.args[0].type.type.type
print r' _this->GetDevice(&_pDevice);'
print r' HRESULT _reason = _pDevice->GetDeviceRemovedReason();'
print r' retrace::failed(call, _reason);'
print r' exit(EXIT_FAILURE);'
print r' }'
Retracer.handleFailure(self, interface, methodOrFunction)
def forceDriver(self, enum):
# This can only work when pAdapter is NULL. For non-NULL pAdapter we
# need to override inside the EnumAdapters call below
print r' if (pAdapter == NULL) {'
print r' switch (retrace::driver) {'
print r' case retrace::DRIVER_HARDWARE:'
print r' DriverType = %s_HARDWARE;' % enum
print r' Software = NULL;'
print r' break;'
print r' case retrace::DRIVER_SOFTWARE:'
print r' DriverType = %s_WARP;' % enum
print r' Software = NULL;'
print r' break;'
print r' case retrace::DRIVER_REFERENCE:'
print r' DriverType = %s_REFERENCE;' % enum
print r' Software = NULL;'
print r' break;'
print r' case retrace::DRIVER_NULL:'
print r' DriverType = %s_NULL;' % enum
print r' Software = NULL;'
print r' break;'
print r' case retrace::DRIVER_MODULE:'
print r' DriverType = %s_SOFTWARE;' % enum
print r' Software = LoadLibraryA(retrace::driverModule);'
print r' if (!Software) {'
print r' retrace::warning(call) << "failed to load " << retrace::driverModule << "\n";'
print r' }'
print r' break;'
print r' default:'
print r' assert(0);'
print r' /* fall-through */'
print r' case retrace::DRIVER_DEFAULT:'
print r' if (DriverType == %s_SOFTWARE) {' % enum
print r' Software = LoadLibraryA("d3d10warp");'
print r' if (!Software) {'
print r' retrace::warning(call) << "failed to load d3d10warp.dll\n";'
print r' }'
print r' }'
print r' break;'
print r' }'
print r' } else {'
print r' Software = NULL;'
print r' }'
def doInvokeInterfaceMethod(self, interface, method):
Retracer.doInvokeInterfaceMethod(self, interface, method)
# Keep retrying ID3D11VideoContext::DecoderBeginFrame when returns E_PENDING
if interface.name == 'ID3D11VideoContext' and method.name == 'DecoderBeginFrame':
print r' while (_result == D3DERR_WASSTILLDRAWING || _result == E_PENDING) {'
print r' Sleep(1);'
Retracer.doInvokeInterfaceMethod(self, interface, method)
print r' }'
def invokeInterfaceMethod(self, interface, method):
# keep track of the last used device for state dumping
if interface.name in ('ID3D10Device', 'ID3D10Device1'):
if method.name == 'Release':
print r' if (call.ret->toUInt() == 0) {'
print r' d3d10Dumper.unbindDevice(_this);'
print r' }'
else:
print r' d3d10Dumper.bindDevice(_this);'
if interface.name.startswith('ID3D11DeviceContext'):
if method.name == 'Release':
print r' if (call.ret->toUInt() == 0) {'
print r' d3d11Dumper.unbindDevice(_this);'
print r' }'
else:
print r' d3d11Dumper.bindDevice(_this);'
# intercept private interfaces
if method.name == 'QueryInterface':
print r' if (!d3dretrace::overrideQueryInterface(_this, riid, ppvObj, &_result)) {'
Retracer.invokeInterfaceMethod(self, interface, method)
print r' }'
return
# create windows as neccessary
if method.name == 'CreateSwapChain':
print r' d3dretrace::createWindowForSwapChain(pDesc);'
if method.name == 'CreateSwapChainForHwnd':
print r' WindowHandle = d3dretrace::createWindow(pDesc->Width, pDesc->Height);'
print r' // DXGI_SCALING_NONE is only supported on Win8 and beyond'
print r' if (pDesc->Scaling == DXGI_SCALING_NONE && !IsWindows8OrGreater()) {'
print r' pDesc->Scaling = DXGI_SCALING_STRETCH;'
print r' }'
if method.name == 'CreateSwapChainForComposition':
print r' HWND hWnd = d3dretrace::createWindow(pDesc->Width, pDesc->Height);'
print r' _result = _this->CreateSwapChainForHwnd(pDevice, hWnd, pDesc, NULL, pRestrictToOutput, ppSwapChain);'
self.checkResult(interface, method)
return
if method.name == 'CreateTargetForHwnd':
print r' hwnd = d3dretrace::createWindow(1024, 768);'
if method.name == 'SetFullscreenState':
print r' if (retrace::forceWindowed) {'
print r' DXGI_SWAP_CHAIN_DESC Desc;'
print r' _this->GetDesc(&Desc);'
print r' if (Desc.BufferDesc.Format != DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM) {'
print r' Fullscreen = FALSE;'
print r' pTarget = nullptr;'
print r' }'
print r' }'
# notify frame has been completed
if interface.name.startswith('IDXGISwapChain') and method.name.startswith('Present'):
if interface.name.startswith('IDXGISwapChainDWM'):
print r' com_ptr<IDXGISwapChain> pSwapChain;'
print r' if (SUCCEEDED(_this->QueryInterface(IID_IDXGISwapChain, (void **) &pSwapChain))) {'
print r' dxgiDumper.bindDevice(pSwapChain);'
print r' } else {'
print r' assert(0);'
print r' }'
else:
print r' dxgiDumper.bindDevice(_this);'
print r' retrace::frameComplete(call);'
if 'pSharedResource' in method.argNames():
print r' if (pSharedResource) {'
print r' retrace::warning(call) << "shared surfaces unsupported\n";'
print r' pSharedResource = NULL;'
print r' }'
# Force driver
if interface.name.startswith('IDXGIFactory') and method.name.startswith('EnumAdapters'):
print r' const char *szSoftware = NULL;'
print r' switch (retrace::driver) {'
print r' case retrace::DRIVER_REFERENCE:'
print r' case retrace::DRIVER_SOFTWARE:'
print r' szSoftware = "d3d10warp.dll";'
print r' break;'
print r' case retrace::DRIVER_MODULE:'
print r' szSoftware = retrace::driverModule;'
print r' break;'
print r' default:'
print r' break;'
print r' }'
print r' HMODULE hSoftware = NULL;'
print r' if (szSoftware) {'
print r' hSoftware = LoadLibraryA(szSoftware);'
print r' if (!hSoftware) {'
print r' retrace::warning(call) << "failed to load " << szSoftware << "\n";'
print r' }'
print r' }'
print r' if (hSoftware) {'
print r' _result = _this->CreateSoftwareAdapter(hSoftware, reinterpret_cast<IDXGIAdapter **>(ppAdapter));'
print r' } else {'
Retracer.invokeInterfaceMethod(self, interface, method)
print r' }'
return
if interface.name.startswith('ID3D10Device') and method.name.startswith('OpenSharedResource'):
print r' retrace::warning(call) << "replacing shared resource with checker pattern\n";'
print r' _result = d3dretrace::createSharedResource(_this, ReturnedInterface, ppResource);'
self.checkResult(interface, method)
return
if interface.name.startswith('ID3D11Device') and method.name == 'OpenSharedResource':
# Some applications (e.g., video playing in IE11) create shared resources within the same process.
# TODO: Generalize to other OpenSharedResource variants
print r' retrace::map<HANDLE>::const_iterator it = _shared_handle_map.find(hResource);'
print r' if (it == _shared_handle_map.end()) {'
print r' retrace::warning(call) << "replacing shared resource with checker pattern\n";'
print r' _result = d3dretrace::createSharedResource(_this, ReturnedInterface, ppResource);'
self.checkResult(interface, method)
print r' } else {'
print r' hResource = it->second;'
Retracer.invokeInterfaceMethod(self, interface, method)
print r' }'
return
if interface.name.startswith('ID3D11Device') and method.name.startswith('OpenSharedResource'):
print r' retrace::warning(call) << "replacing shared resource with checker pattern\n";'
print r' _result = d3dretrace::createSharedResource(_this, ReturnedInterface, ppResource);'
if method.name == 'OpenSharedResourceByName':
print r' (void)lpName;'
print r' (void)dwDesiredAccess;'
else:
print r' (void)hResource;'
self.checkResult(interface, method)
return
if method.name == 'Map':
# Reset _DO_NOT_WAIT flags. Otherwise they may fail, and we have no
# way to cope with it (other than retry).
mapFlagsArg = method.getArgByName('MapFlags')
for flag in mapFlagsArg.type.values:
if flag.endswith('_MAP_FLAG_DO_NOT_WAIT'):
print r' MapFlags &= ~%s;' % flag
if method.name.startswith('UpdateSubresource'):
# The D3D10 debug layer is buggy (or at least inconsistent with the
# runtime), as it seems to estimate and enforce the data size based on the
# SrcDepthPitch, even for non 3D textures, but in some traces
# SrcDepthPitch is garbagge for non 3D textures.
# XXX: It also seems to expect padding bytes at the end of the last
# row, but we never record (or allocate) those...
print r' if (retrace::debug >= 2 && pDstBox && pDstBox->front == 0 && pDstBox->back == 1) {'
print r' SrcDepthPitch = 0;'
print r' }'
if method.name == 'SetGammaControl':
# This method is only supported while in full-screen mode
print r' if (retrace::forceWindowed) {'
print r' return;'
print r' }'
if method.name == 'GetData':
print r' pData = _allocator.alloc(DataSize);'
print r' do {'
self.doInvokeInterfaceMethod(interface, method)
print r' GetDataFlags = 0; // Prevent infinite loop'
print r' } while (_result == S_FALSE);'
self.checkResult(interface, method)
print r' return;'
Retracer.invokeInterfaceMethod(self, interface, method)
if method.name in ('AcquireSync', 'ReleaseSync'):
print r' if (SUCCEEDED(_result) && _result != S_OK) {'
print r' retrace::warning(call) << " returned " << _result << "\n";'
print r' }'
# process events after presents
if interface.name.startswith('IDXGISwapChain') and method.name.startswith('Present'):
print r' d3dretrace::processEvents();'
if method.name in ('Map', 'Unmap'):
if interface.name.startswith('ID3D11DeviceContext'):
print ' void * & _pbData = g_Maps[_this][SubresourceKey(pResource, Subresource)];'
else:
subresourceArg = method.getArgByName('Subresource')
if subresourceArg is None:
print ' UINT Subresource = 0;'
print ' void * & _pbData = g_Maps[0][SubresourceKey(_this, Subresource)];'
if method.name == 'Map':
print ' _MAP_DESC _MapDesc;'
print ' _getMapDesc(_this, %s, _MapDesc);' % ', '.join(method.argNames())
print ' size_t _MappedSize = _MapDesc.Size;'
print ' if (_MapDesc.Size) {'
print ' _pbData = _MapDesc.pData;'
if interface.name.startswith('ID3D11DeviceContext'):
# Prevent false warnings on 1D and 2D resources, since the
# pitches are often junk there...
print ' _normalizeMap(pResource, pMappedResource);'
else:
print ' _pbData = _MapDesc.pData;'
print ' } else {'
print ' return;'
print ' }'
if method.name == 'Unmap':
print ' if (_pbData) {'
print ' retrace::delRegionByPointer(_pbData);'
print ' _pbData = 0;'
print ' }'
if interface.name.startswith('ID3D11VideoContext'):
if method.name == 'GetDecoderBuffer':
print ' if (*ppBuffer && *pBufferSize) {'
print ' g_Maps[nullptr][SubresourceKey(_this, Type)] = *ppBuffer;'
print ' }'
if method.name == 'ReleaseDecoderBuffer':
print ' SubresourceKey _mappingKey(_this, Type);'
print ' void *_pBuffer = g_Maps[nullptr][_mappingKey];'
print ' if (_pBuffer) {'
print ' retrace::delRegionByPointer(_pBuffer);'
print ' g_Maps[nullptr][_mappingKey] = 0;'
print ' }'
# Attach shader byte code for lookup
if 'pShaderBytecode' in method.argNames():
ppShader = method.args[-1]
assert ppShader.output
print r' if (retrace::dumpingState && SUCCEEDED(_result)) {'
print r' (*%s)->SetPrivateData(d3dstate::GUID_D3DSTATE, BytecodeLength, pShaderBytecode);' % ppShader.name
print r' }'
if method.name == 'CreateBuffer':
ppBuffer = method.args[-1]
print r' if (retrace::dumpingState && SUCCEEDED(_result)) {'
print r' char label[32];'
print r' _snprintf(label, sizeof label, "0x%%llx", call.arg(%u).toArray()->values[0]->toUIntPtr());' % ppBuffer.index
print r' (*%s)->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(label)+1, label);' % ppBuffer.name
print r' }'
def retraceInterfaceMethodBody(self, interface, method):
Retracer.retraceInterfaceMethodBody(self, interface, method)
# Add pitch swizzling information to the region
if method.name == 'Map' and interface.name not in ('ID3D10Buffer', 'ID3D10Texture1D'):
if interface.name.startswith('ID3D11DeviceContext'):
outArg = method.getArgByName('pMappedResource')
memberNames = ('pData', 'RowPitch', 'DepthPitch')
elif interface.name.startswith('ID3D10'):
outArg = method.args[-1]
memberNames = ('pData', 'RowPitch', 'DepthPitch')
elif interface.name == 'IDXGISurface':
outArg = method.getArgByName('pLockedRect')
memberNames = ('pBits', 'Pitch', None)
else:
raise NotImplementedError
struct = outArg.type.type
dataMemberName, rowPitchMemberName, depthPitchMemberName = memberNames
dataMemberIndex = struct.getMemberByName(dataMemberName)
rowPitchMemberIndex = struct.getMemberByName(rowPitchMemberName)
print r' if (_pbData && %s->%s != 0) {' % (outArg.name, rowPitchMemberName)
print r' const trace::Array *_%s = call.arg(%u).toArray();' % (outArg.name, outArg.index)
print r' if (%s) {' % outArg.name
print r' const trace::Struct *_struct = _%s->values[0]->toStruct();' % (outArg.name)
print r' if (_struct) {'
print r' unsigned long long traceAddress = _struct->members[%u]->toUIntPtr();' % dataMemberIndex
print r' int traceRowPitch = _struct->members[%u]->toSInt();' % rowPitchMemberIndex
print r' int realRowPitch = %s->%s;' % (outArg.name, rowPitchMemberName)
print r' if (realRowPitch && traceRowPitch != realRowPitch) {'
print r' retrace::setRegionPitch(traceAddress, 2, traceRowPitch, realRowPitch);'
print r' }'
try:
depthPitchMemberIndex = struct.getMemberByName(depthPitchMemberName)
except ValueError:
assert len(struct.members) < 3
pass
else:
assert depthPitchMemberName == 'DepthPitch'
print r' if (%s->DepthPitch) {' % outArg.name
print r' retrace::checkMismatch(call, "DepthPitch", _struct->members[%u], %s->DepthPitch);' % (struct.getMemberByName('DepthPitch'), outArg.name)
print r' }'
print r' }'
print r' }'
print r' }'
def extractArg(self, function, arg, arg_type, lvalue, rvalue):
# Set object names
if function.name == 'SetPrivateData' and arg.name == 'pData':
iid = function.args[0].name
print r' if (%s != WKPDID_D3DDebugObjectName) {' % iid
print r' return;'
print r' }'
# Interpret argument as string
Retracer.extractArg(self, function, arg, LPCSTR, lvalue, rvalue)
print r' if (!pData) {'
print r' return;'
print r' }'
print r' assert(DataSize >= strlen((const char *)pData));'
print r' // Some applications include the trailing zero terminator in the data'
print r' DataSize = strlen((const char *)pData);'
return
Retracer.extractArg(self, function, arg, arg_type, lvalue, rvalue)
def main():
print r'#define INITGUID'
print
print r'#include <string.h>'
print
print r'#include <iostream>'
print
print r'#include "d3dretrace.hpp"'
print r'#include "os_version.hpp"'
print
print r'#include "d3dretrace_dxgi.hpp"'
print r'#include "d3d10imports.hpp"'
print r'#include "d3d10size.hpp"'
print r'#include "d3d10state.hpp"'
print r'#include "d3d11imports.hpp"'
print r'#include "d3d11size.hpp"'
print r'#include "dcompimports.hpp"'
print r'#include "d3dstate.hpp"'
print r'#include "d3d9imports.hpp" // D3DERR_WASSTILLDRAWING'
print
print '''static d3dretrace::D3DDumper<IDXGISwapChain> dxgiDumper;'''
print '''static d3dretrace::D3DDumper<ID3D10Device> d3d10Dumper;'''
print '''static d3dretrace::D3DDumper<ID3D11DeviceContext> d3d11Dumper;'''
print
api = API()
api.addModule(dxgi)
api.addModule(d3d10)
api.addModule(d3d10_1)
api.addModule(d3d11)
api.addModule(dcomp)
retracer = D3DRetracer()
retracer.retraceApi(api)
if __name__ == '__main__':
main()
| 49.354286
| 180
| 0.563544
|
4a02b233f71c5c573b7ff6c230dfbab5aefcb422
| 1,219
|
py
|
Python
|
code/solver/meta_heur/__init__.py
|
ahillbs/minimum_scan_cover
|
e41718e5a8e0e3039d161800da70e56bd50a1b97
|
[
"MIT"
] | null | null | null |
code/solver/meta_heur/__init__.py
|
ahillbs/minimum_scan_cover
|
e41718e5a8e0e3039d161800da70e56bd50a1b97
|
[
"MIT"
] | null | null | null |
code/solver/meta_heur/__init__.py
|
ahillbs/minimum_scan_cover
|
e41718e5a8e0e3039d161800da70e56bd50a1b97
|
[
"MIT"
] | null | null | null |
from .genetic_algorithm import AngularGeneticMinSumSolver, AngularGeneticLocalMinSumSolver, AngularGeneticMakespanSolver
from .iterated_local_search import AngularIteratedMinSumSolver, AngularIteratedLocalMinSumSolver, AngularIteratedMakespanSolver
from .simulated_annealing import AngularSimulatedAnnealingMinSumSolver, AngularSimulatedAnnealingLocalMinSumSolver, AngularSimulatedAnnealingMakespanSolver
MIN_SUM_SOLVER = {
"AngularGeneticMinSumSolver": AngularGeneticMinSumSolver,
"AngularIteratedMinSumSolver": AngularIteratedMinSumSolver,
"AngularSimulatedAnnealingMinSumSolver": AngularSimulatedAnnealingMinSumSolver
}
LOCAL_MIN_SUM_SOLVER = {
"AngularGeneticLocalMinSumSolver": AngularGeneticLocalMinSumSolver,
"AngularIteratedLocalMinSumSolver": AngularIteratedLocalMinSumSolver,
"AngularSimulatedAnnealingLocalMinSumSolver": AngularSimulatedAnnealingLocalMinSumSolver
}
MAKESPAN_SOLVER = {
"AngularGeneticMakespanSolver": AngularGeneticMakespanSolver,
"AngularIteratedMakespanSolver": AngularIteratedMakespanSolver,
"AngularSimulatedAnnealingMakespanSolver": AngularSimulatedAnnealingMakespanSolver
}
ALL_SOLVER = {**MIN_SUM_SOLVER, **LOCAL_MIN_SUM_SOLVER, **MAKESPAN_SOLVER}
| 60.95
| 155
| 0.876128
|
4a02b2e5e6d45705f660ec0fef1979bb99b3d1de
| 1,204
|
py
|
Python
|
pymeasure/instruments/thorlabs/__init__.py
|
NeoBeats/pymeasure
|
e48f9d679d6ee970e2e875d2fc9a5679378b07aa
|
[
"MIT"
] | null | null | null |
pymeasure/instruments/thorlabs/__init__.py
|
NeoBeats/pymeasure
|
e48f9d679d6ee970e2e875d2fc9a5679378b07aa
|
[
"MIT"
] | null | null | null |
pymeasure/instruments/thorlabs/__init__.py
|
NeoBeats/pymeasure
|
e48f9d679d6ee970e2e875d2fc9a5679378b07aa
|
[
"MIT"
] | null | null | null |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2020 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from .thorlabspm100usb import ThorlabsPM100USB
| 46.307692
| 79
| 0.780731
|
4a02b3504591c793ab0957112c3b06c9aecebce1
| 845
|
py
|
Python
|
torch_glow/tests/nodes/avgpool2d_test.py
|
842974287/glow
|
7d77eb9a1c00dbba77321f62ad9c9078beb2b725
|
[
"Apache-2.0"
] | null | null | null |
torch_glow/tests/nodes/avgpool2d_test.py
|
842974287/glow
|
7d77eb9a1c00dbba77321f62ad9c9078beb2b725
|
[
"Apache-2.0"
] | null | null | null |
torch_glow/tests/nodes/avgpool2d_test.py
|
842974287/glow
|
7d77eb9a1c00dbba77321f62ad9c9078beb2b725
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
class TestAvgPool2d(unittest.TestCase):
def test_avg_pool2d_basic(self):
"""Basic test of the PyTorch avg_pool2d Node on Glow."""
def test_f(inputs):
return F.avg_pool2d(inputs, 3)
inputs = torch.randn(1, 4, 5, 5)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool2d"})
def test_avg_pool2d_with_args(self):
"""Test of the PyTorch avg_pool2d Node with arguments on Glow."""
def test_f(inputs):
return F.avg_pool2d(inputs, padding=3, kernel_size=7)
inputs = torch.randn(1, 4, 10, 10)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool2d"})
| 28.166667
| 82
| 0.688757
|
4a02b3bf9c689e2728ce73db4ccd78d732f85094
| 28,754
|
py
|
Python
|
OpenSeesAPI/Model/Element/Element.py
|
alok230460/Open
|
ccd7c43c82c13bc87e6c208302f7448060b856ba
|
[
"MIT",
"Unlicense"
] | 41
|
2015-09-08T09:41:13.000Z
|
2022-03-26T08:40:02.000Z
|
OpenSeesAPI/Model/Element/Element.py
|
alok230460/Open
|
ccd7c43c82c13bc87e6c208302f7448060b856ba
|
[
"MIT",
"Unlicense"
] | 4
|
2015-09-11T17:20:46.000Z
|
2016-05-02T20:42:58.000Z
|
OpenSeesAPI/Model/Element/Element.py
|
alok230460/Open
|
ccd7c43c82c13bc87e6c208302f7448060b856ba
|
[
"MIT",
"Unlicense"
] | 31
|
2015-09-11T17:01:36.000Z
|
2021-11-08T17:48:27.000Z
|
"""
This class is used to create the following OpenSees TCL Commands:
This command is used to construct an element and add it to the Domain.
element eleType? arg1? ...
The type of element created and the additional arguments required depends on the eleType? provided in the command.
NOTE:
The valid queries to any element when creating an ElementRecorder are documented in the NOTES section for each element.
The following contain information about eleType? and the args required for each of the available element types:
Zero-Length Elements
zeroLength Element
zeroLengthND Element
zeroLengthSection Element
CoupledZeroLength Element
zeroLengthContact Element
zeroLengthContactNTS2D
zeroLengthInterface2D
zeroLengthImpact3D
Truss Elements
Truss Element
Corotational Truss Element
Beam-Column Elements
Elastic Beam Column Element
Elastic Beam Column Element with Stiffness Modifiers
Elastic Timoshenko Beam Column Element
Beam With Hinges Element
Displacement-Based Beam-Column Element
Force-Based Beam-Column Element
Flexure-Shear Interaction Displacement-Based Beam-Column Element
Joint Elements
BeamColumnJoint Element
ElasticTubularJoint Element
Joint2D Element
Link Elements
Two Node Link Element
Bearing Elements
Elastomeric Bearing (Plasticity) Element
Elastomeric Bearing (Bouc-Wen) Element
Flat Slider Bearing Element
Single Friction Pendulum Bearing Element
TFP Bearing
Triple Friction Pendulum Element
MultipleShearSpring Element
KikuchiBearing Element
YamamotoBiaxialHDR Element
ElastomericX
LeadRubberX
HDR
RJ-Watson EQS Bearing Element
Quadrilateral Elements
Quad Element
Shell Element
ShellNL
Bbar Plane Strain Quadrilateral Element
Enhanced Strain Quadrilateral Element
SSPquad Element
Triangular Elements
Tri31 Element
Brick Elements
Standard Brick Element
Bbar Brick Element
Twenty Node Brick Element
Twenty Seven Node Brick Element
SSPbrick Element
u-p Elements
UC San Diego u-p element (saturated soil)
Four Node Quad u-p Element
Brick u-p Element
bbarQuad u-p Element
bbarBrick u-p Element
Nine Four Node Quad u-p Element
Twenty Eight Node Brick u-p Element
Twenty Node Brick u-p Element
Brick Large Displacement u-p Element
SSPquadUP Element
SSPbrickUP Element
Misc.
ShallowFoundationGen
SurfaceLoad Element
Contact Elements
SimpleContact2D Element
SimpleContact3D Element
BeamContact2D Element
BeamContact3D Element
BeamEndContact3D Element
zeroLengthImpact3D
"""
__author__ = 'marafi'
from OpenSeesAPI.OpenSees import OpenSees
class Truss(OpenSees):
"""
One way is to specify an area and a UniaxialMaterial identifier:
element truss $eleTag $iNode $jNode $A $matTag <-rho $rho> <-cMass $cFlag> <-doRayleigh $rFlag>
the other is to specify a Section identifier:
element trussSection $eleTag $iNode $jNode $secTag <-rho $rho> <-cMass $cFlag> <-doRayleigh $rFlag>
$eleTag unique element object tag
$iNode $jNode end nodes
$A cross-sectional area of element
$matTag tag associated with previously-defined UniaxialMaterial
$secTag tag associated with previously-defined Section
$rho mass per unit length, optional, default = 0.0
$cFlag consistent mass flag, optional, default = 0
cFlag = 0 lumped mass matrix (default)
cFlag = 1 consistent mass matrix
$rFlag Rayleigh damping flag, optional, default = 0
rFlag = 0 NO RAYLEIGH DAMPING (default)
rFlag = 1 include Rayleigh damping
"""
def __init__(self, id, NodeI, NodeJ, Area, Material, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._Area = Area
self._Material = Material
self._CommandLine = 'element truss %d %s %s %f %s'%(self.id, self._NodeI.id, self._NodeJ.id, self._Area, self._Material.id)
self.__dict__.update(kwargs)
class ZeroLength(OpenSees):
"""
element zeroLength $eleTag $iNode $jNode -mat $matTag1 $matTag2 ... -dir $dir1 $dir2 ...<-doRayleigh $rFlag> <-orient $x1 $x2 $x3 $yp1 $yp2 $yp3>
$eleTag unique element object tag
$iNode $jNode end nodes
$matTag1 $matTag2 ... tags associated with previously-defined UniaxialMaterials
$dir1 $dir2 ... material directions:
1,2,3 - translation along local x,y,z axes, respectively;
4,5,6 - rotation about local x,y,z axes, respectively
$x1 $x2 $x3 vector components in global coordinates defining local x-axis (optional)
$yp1 $yp2 $yp3 vector components in global coordinates defining vector yp which lies in the local x-y plane for the element. (optional)
$rFlag optional, default = 0
rFlag = 0 NO RAYLEIGH DAMPING (default)
rFlag = 1 include rayleigh damping
"""
def __init__(self, id, NodeI, NodeJ, MaterialList, DOFList, OrientDirection=None, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._MaterialList = MaterialList
self._DOFList = DOFList
self._OrientDirection = OrientDirection
self.__dict__.update(kwargs)
OD = ''
if OrientDirection != None:
OD = '-orient %f %f %f %f %f %f'%tuple(OrientDirection)
self._CommandLine = 'element zeroLength %d %d %d -mat %s -dir %s %s'%(self._id, self._NodeI.id, self._NodeJ.id, ''.join([' %d'%s.id for s in MaterialList]), ''.join([' %d'%s for s in DOFList]), OD)
class ZeroLengthSection(OpenSees):
"""
This command is used to construct a zero length element object, which is defined by two nodes at the same location. The nodes are connected by a single section object to represent the force-deformation relationship for the element.
element zeroLengthSection $eleTag $iNode $jNode $secTag <-orient $x1 $x2 $x3 $yp1 $yp2 $yp3> <-doRayleigh $rFlag>
$eleTag unique element object tag
$iNode $jNode end nodes
$secTag tag associated with previously-defined Section object
$x1 $x2 $x3 vector components in global coordinates defining local x-axis (optional)
$yp1 $yp2 $yp3 vector components in global coordinates defining vector yp which lies in the local x-y plane for the element. (optional)
$rFlag optional, default = 1
rFlag = 0 no Rayleigh damping
rFlag = 1 include Rayleigh damping (default)
"""
def __init__(self, id, NodeI, NodeJ, Section, OrientDirection=None, Optional='',**kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._Section = Section
self._OrientDirection = OrientDirection
self._Optional = Optional
self.__dict__.update(kwargs)
OD = ''
if OrientDirection != None:
OD = '-orient %f %f %f %f %f %f'%tuple(OrientDirection)
self._CommandLine = 'element zeroLengthSection %d %d %d %d %s %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._Section.id, OD, self._Optional)
class ElasticBeamColumn(OpenSees):
"""
For a two-dimensional problem:
element elasticBeamColumn $eleTag $iNode $jNode $A $E $Iz $transfTag <-mass $massDens> <-cMass>
For a three-dimensional problem:
element elasticBeamColumn $eleTag $iNode $jNode $A $E $G $J $Iy $Iz $transfTag <-mass $massDens> <-cMass>
$eleTag unique element object tag
$iNode $jNode end nodes
$A cross-sectional area of element
$E Young's Modulus
$G Shear Modulus
$J torsional moment of inertia of cross section
$Iz second moment of area about the local z-axis
$Iy second moment of area about the local y-axis
$transfTag identifier for previously-defined coordinate-transformation (CrdTransf) object
$massDens element mass per unit length (optional, default = 0.0)
-cMass to form consistent mass matrix (optional, default = lumped mass matrix)
"""
def __init__(self, id, NodeI, NodeJ, A, E, Iz, TransTag, G=None, J=None, Iy=None, Mass=None, Option='', **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._A = A
self._E = E
self._Iz = Iz
self._TransTag = TransTag
self._G = G
self._J = J
self._Iy = Iy
self._Mass = Mass
self._Option = Option
self.__dict__.update(kwargs)
if self._Mass != None:
self._EndCommand = '-mass %f %s'%(self._Mass,self._Option)
else:
self._EndCommand = self._Option
if G == None:
self._CommandLine = 'element elasticBeamColumn %d %d %d %f %f %f %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._A, self._E, self._Iz, self._TransTag.id, self._EndCommand)
else:
self._CommandLine = 'element elasticBeamColumn %d %d %d %f %f %f %f %f %f %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._A, self._E, self._G, self._J, self._Iy, self._Iz, self._TransTag.id, self._EndCommand)
class ElasticTimoshenkoBeam(OpenSees):
"""
This command is used to construct an ElasticTimoshenkoBeam element object. A Timoshenko beam is a frame member that accounts for shear deformations. The arguments for the construction of an elastic Timoshenko beam element depend on the dimension of the problem, ndm:
For a two-dimensional problem:
element ElasticTimoshenkoBeam $eleTag $iNode $jNode $E $G $A $Iz $Avy $transfTag <-mass $massDens> <-cMass>
For a three-dimensional problem:
element ElasticTimoshenkoBeam $eleTag $iNode $jNode $E $G $A $Jx $Iy $Iz $Avy $Avz $transfTag <-mass $massDens> <-cMass>
$eleTag unique element object tag
$iNode $jNode end nodes
$E Young's Modulus
$G Shear Modulus
$A cross-sectional area of element
$Jx torsional moment of inertia of cross section
$Iy second moment of area about the local y-axis
$Iz second moment of area about the local z-axis
$Avy Shear area for the local y-axis
$Avz Shear area for the local z-axis
$transfTag identifier for previously-defined coordinate-transformation (CrdTransf) object
$massDens element mass per unit length (optional, default = 0.0)
-cMass to form consistent mass matrix (optional, default = lumped mass matrix)
"""
def __init__(self, id, NodeI, NodeJ, E, G, A, Iz, Avy, TransTag, Jx=None, Iy=None, Avz=None, MassDensity=None, Option='', **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._E = E
self._G = G
self._A = A
self._Jx = Jx
self._Iy = Iy
self._Iz = Iz
self._Avy = Avy
self._Avz = Avz
self._TransTag = TransTag
self._MassDensity = MassDensity
self._Option = Option
self.__dict__.update(kwargs)
if self._MassDensity != None:
self._EndCommand = '-mass %f %s'%(self._MassDensity,self._Option)
else:
self._EndCommand = self._Option
if Jx == None:
self._CommandLine = 'element ElasticTimoshenkoBeam %d %d %d %f %f %f %f %f %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._E, self._G, self._A, self._Iz, self._Avy, self._TransTag.id, self._EndCommand)
else:
self._CommandLine = 'element ElasticTimoshenkoBeam %d %d %d %f %f %f %f %f %f %f %f %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._E, self._G, self._A, self._Jx, self._Iy, self._Iz, self._Avy, self._Avz, self._TransTag.id, self._EndCommand)
class DispBeamColumn(OpenSees):
"""
element dispBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-cMass> <-integration $intType>
To change the sections along the element length, the following form of command may be used:
element dispBeamColumn $eleTag $iNode $jNode $numIntgrPts -sections $secTag1 $secTag2 ... $transfTag <-mass $massDens> <-cMass> <-integration $intType>
$eleTag unique element object tag
$iNode $jNode end nodes
$numIntgrPts number of integration points along the element.
$secTag identifier for previously-defined section object
$secTag1 $secTag2 ... $numIntgrPts identifiers of previously-defined section object
$transfTag identifier for previously-defined coordinate-transformation (CrdTransf) object
$massDens element mass density (per unit length), from which a lumped-mass matrix is formed (optional, default = 0.0)
-cMass to form consistent mass matrix (optional, default = lumped mass matrix)
$intType numerical integration type, options are Lobotto, Legendre, Radau, NewtonCotes, Trapezoidal (optional, default = Legendre)
"""
def __init__(self, id, NodeI, NodeJ, numIntgrPts, Section, GeomTrans, Mass=None, Optional='', **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._numIntgrPts = numIntgrPts
self._Section = Section
self._GeoTrans = GeomTrans
self._Mass = Mass
self._Optional = Optional
self.__dict__.update(kwargs)
if self._Mass != None:
self._EndCommand = '-mass %f %s'%(self._Mass,self._Optional)
else:
self._EndCommand = self._Optional
if type(self._Section) != list:
self._CommandLine = 'element dispBeamColumn %d %d %d %d %d %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._numIntgrPts, self._Section.id, self._GeoTrans.id, self._EndCommand)
else:
self._CommandLine = 'element dispBeamColumn %d %d %d %d -sections %s %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._numIntgrPts, ''.join([' %d'%x.id for x in self._Section]), self._GeoTrans.id, self._EndCommand)
class ForceBeamColumn(OpenSees):
"""
element forceBeamColumn $eleTag $iNode $jNode $transfTag "IntegrationType arg1 arg2 ..." <-mass $massDens> <-iter $maxIters $tol>
$eleTag unique element object tag
$iNode $jNode end nodes
$transfTag identifier for previously-defined coordinate-transformation (CrdTransf) object
IntegrationType arg1 arg2 ... specifies locations and weights of integration points and their associated section force-deformation models (see File:IntegrationTypes.pdf)
$massDens element mass density (per unit length), from which a lumped-mass matrix is formed (optional, default=0.0)
$maxIters maximum number of iterations to undertake to satisfy element compatibility (optional, default=10)
$tol tolerance for satisfaction of element compatibility (optional, default=10-12)
Original command that assumes Gauss-Lobatto integration with a copy of the same section force-deformation model at each integration point:
element forceBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-iter $maxIters $tol> <-integration $intType>
$eleTag unique element object tag
$numIntgrPts number of Gauss-Lobatto integration points along the element.
$secTag identifier for previously-defined section object
Alternative command (kept for backward compatability):
element nonlinearBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-iter $maxIters $tol> <-integration $intType>
$eleTag unique element object tag
$intType numerical integration type, options are Lobatto, Legendre, Radau, NewtonCotes, Trapezoidal (optional, default= Lobatto)
"""
def __init__(self, id, NodeI, NodeJ, GeomTrans, IntegrationType, Mass=None, Optional='', **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._GeomTrans = GeomTrans
self._IntegrationType = IntegrationType # Assume to be a string of args
self._Mass = Mass
self._Optional = Optional
self.__dict__.update(kwargs)
if self._Mass != None:
self._EndCommand = '-mass %f %s'%(self._Mass,self._Optional)
else:
self._EndCommand = ' %s'%(self._Optional)
self._CommandLine = 'element forceBeamColumn %d %d %d %d %s %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._GeomTrans.id, self._IntegrationType, self._EndCommand)
class ForceBeamColumnOriginal(OpenSees):
"""
Original command that assumes Gauss-Lobatto integration with a copy of the same section force-deformation model at each integration point:
element forceBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-iter $maxIters $tol> <-integration $intType>
$eleTag unique element object tag
$numIntgrPts number of Gauss-Lobatto integration points along the element.
$secTag identifier for previously-defined section object
"""
def __init__(self, id, NodeI, NodeJ, NoOfIntPoints, Section, GeomTrans, Mass=None, Optional='', **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._NoOfIntPoints = NoOfIntPoints
self._Section = Section
self._GeomTrans = GeomTrans
self._Mass = Mass
self._Optional = Optional
self.__dict__.update(kwargs)
if self._Mass != None:
self._EndCommand = '-mass %f %s'%(self._Mass,self._Optional)
else:
self._EndCommand = ' %s'%(self._Optional)
if type(self._Section) == str:
self._CommandLine = 'element forceBeamColumn %d %d %d %d %s %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._NoOfIntPoints, self._Section, self._GeomTrans.id, self._EndCommand)
elif type(self._Section) == list:
self._CommandLine = 'element forceBeamColumn %d %d %d %d -sections %s %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._NoOfIntPoints, ''.join([' %d'%x.id for x in self._Section]), self._GeomTrans.id, self._EndCommand)
else:
self._CommandLine = 'element forceBeamColumn %d %d %d %d %d %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._NoOfIntPoints, self._Section.id, self._GeomTrans.id, self._EndCommand)
class ForceBeamColumnUserDefined(OpenSees):
"""
element forceBeamColumn $eleTag $iNode $jNode $transfTag "IntegrationType arg1 arg2 ..." <-mass $massDens> <-iter $maxIters $tol>
$eleTag unique element object tag
$iNode $jNode end nodes
$transfTag identifier for previously-defined coordinate-transformation (CrdTransf) object
IntegrationType arg1 arg2 ... specifies locations and weights of integration points and their associated section force-deformation models (see File:IntegrationTypes.pdf)
$massDens element mass density (per unit length), from which a lumped-mass matrix is formed (optional, default=0.0)
$maxIters maximum number of iterations to undertake to satisfy element compatibility (optional, default=10)
$tol tolerance for satisfaction of element compatibility (optional, default=10-12)
Original command that assumes Gauss-Lobatto integration with a copy of the same section force-deformation model at each integration point:
element forceBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-iter $maxIters $tol> <-integration $intType>
$eleTag unique element object tag
$numIntgrPts number of Gauss-Lobatto integration points along the element.
$secTag identifier for previously-defined section object
Alternative command (kept for backward compatability):
element nonlinearBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-iter $maxIters $tol> <-integration $intType>
$eleTag unique element object tag
$intType numerical integration type, options are Lobatto, Legendre, Radau, NewtonCotes, Trapezoidal (optional, default= Lobatto)
"""
def __init__(self, id, NodeI, NodeJ, GeomTrans, SectionList, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._GeomTrans = GeomTrans
self._NoOfIntPoints = len(SectionList)
self._SectionList = SectionList #list of section ids
self.__dict__.update(kwargs)
self._SectionString = ''
for tag in SectionList:
self._SectionString += '%d '%tag._id
if self._NoOfIntPoints == 3:
self._CommandLine = 'element forceBeamColumn %d %d %d %d UserDefined %d %s 0 0.5 1 0.166665 0.66667 0.166665'%(self._id, self._NodeI.id, self._NodeJ.id, self._GeomTrans.id,self._NoOfIntPoints,self._SectionString)
elif self._NoOfIntPoints == 4:
self._CommandLine = 'element forceBeamColumn %d %d %d %d UserDefined %d %s 0 0.276395 0.723605 1 0.083334 0.416665 0.416665 0.083334'%(self._id, self._NodeI.id, self._NodeJ.id, self._GeomTrans.id,self._NoOfIntPoints,self._SectionString)
elif self._NoOfIntPoints == 5:
self._CommandLine = 'element forceBeamColumn %d %d %d %d UserDefined %d %s 0 0.17267 0.5 0.82733 1 0.05 0.272222 0.355556 0.272222 0.05 '%(self._id, self._NodeI.id, self._NodeJ.id, self._GeomTrans.id,self._NoOfIntPoints,self._SectionString)
elif self._NoOfIntPoints == 6:
self._CommandLine = 'element forceBeamColumn %d %d %d %d UserDefined %d %s 0 0.117473 0.357384 0.642616 0.882528 1 0.033333 0.189235 0.27743 0.27743 0.189235 0.033333'%(self._id, self._NodeI.id, self._NodeJ.id, self._GeomTrans.id, self._NoOfIntPoints, self._SectionString)
else:
print('Number of Sections not supported')
self._CommandLine = ''
class Joint2D(OpenSees):
"""
element Joint2D $eleTag $Nd1 $Nd2 $Nd3 $Nd4 $NdC <$Mat1 $Mat2 $Mat3 $Mat4> $MatC $LrgDspTag
$eleTag unique element object tag
$Nd1 $Nd2 $Nd3 $Nd4 integer tags indicating four external nodes where the joint element is connected to the adjoining beam-column element
$NdC integer tags indicating the central node of beam-column joint (the tag is used to generate the internal node, thus, the node should not exist in the domain or be used by any other node)
$Mat1 uniaxial material tag for interface rotational spring at node 1. Use a zero tag to indicate the case that a beam-column element is rigidly framed to the joint. (optional)
$Mat2 uniaxial material tag for interface rotational spring at node 2. Use a zero tag to indicate the case that a beam-column element is rigidly framed to the joint. (optional)
$Mat3 uniaxial material tag for interface rotational spring at node 3. Use a zero tag to indicate the case that a beam-column element is rigidly framed to the joint. (optional)
$Mat4 uniaxial material tag for interface rotational spring at node 4. Use a zero tag to indicate the case that a beam-column element is rigidly framed to the joint. (optional)
$MatC uniaxial material tag for rotational spring of the central node that describes shear panel behavior
$LrgDspTag an integer indicating the flag for considering large deformations:
0 - for small deformations and constant geometry
1 - for large deformations and time varying geometry
"""
def __init__(self, id, NodeI, NodeJ, NodeK, NodeL, NodeCTag, MatC, LargeDispTag, MatIJKL=None, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._NodeK = NodeK
self._NodeL = NodeL
self._NodeCTag = NodeCTag
self._MatC = MatC
self._LargeDispTag = LargeDispTag
self._MatIJKL = MatIJKL
self.__dict__.update(kwargs)
if MatIJKL==None:
self._CommandLine = 'element Joint2D %d %d %d %d %d %d %d %d'%(self._id, self._NodeI.id, self._NodeJ.id, self._NodeK.id, self._NodeL.id, self._NodeCTag, self._MatC.id, self._LargeDispTag)
else:
self._CommandLine = 'element Joint2D %d %d %d %d %d %d %d %d %d %d %d %d'%(self._id, self._NodeI.id, self._NodeJ.id, self._NodeK.id, self._NodeL.id, self._NodeCTag, self._MatIJKL[0].id, self._MatIJKL[1].id, self._MatIJKL[2].id, self._MatIJKL[3].id,self._MatC.id, self._LargeDispTag)
class ShellMITC4(OpenSees):
"""
This command is used to construct a ShellMITC4 element object, which uses a bilinear isoparametric formulation in combination with a modified shear interpolation to improve thin-plate bending performance.
element ShellMITC4 $eleTag $iNode $jNode $kNode $lNode $secTag
$eleTag unique element object tag
$iNode $jNode $kNode $lNode four nodes defining element boundaries, input in counter-clockwise order around the element.
$secTag tag associated with previously-defined SectionForceDeformation object.
Currently must be either a PlateFiberSection, or ElasticMembranePlateSection
"""
def __init__(self, id, NodeI, NodeJ, NodeK, NodeL, Section, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._NodeK = NodeK
self._NodeL = NodeL
self._Section = Section
self.__dict__.update(kwargs)
self._CommandLine = 'element ShellMITC4 %d %d %d %d %d %d'%(self._id, self._NodeI.id, self._NodeJ.id, self._NodeK.id, self._NodeL.id, self._Section.id)
class ShellDKGQ(OpenSees):
"""
This command is used to construct a ShellDKGQ element object, which is a quadrilateral shell element based on the theory of generalized conforming element.
element ShellDKGQ $eleTag $iNode $jNode $kNode $lNode $secTag
$eleTag unique element object tag
$iNode $jNode $kNode $lNode four nodes defining element boundaries, input in clockwise or counter-clockwise order around the element.
$secTag tag associated with previously-defined SectionForceDeformation object.
Currently can be a PlateFiberSection, a ElasticMembranePlateSection and a LayeredShell section
"""
def __init__(self, id, NodeI, NodeJ, NodeK, NodeL, Section, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._NodeK = NodeK
self._NodeL = NodeL
self._Section = Section
self.__dict__.update(kwargs)
self._CommandLine = 'element ShellDKGQ %d %d %d %d %d %d' % (
self._id, self._NodeI.id, self._NodeJ.id, self._NodeK.id, self._NodeL.id, self._Section.id)
class ShellNLDKGQ(OpenSees):
"""
This command is used to construct a ShellNLDKGQ element object accounting for the geometric nonlinearity of large deformation using the updated Lagrangian formula, which is developed based on the ShellDKGQ element.
element ShellNLDKGQ $eleTag $iNode $jNode $kNode $lNode $secTag
$eleTag unique element object tag
$iNode $jNode $kNode $lNode four nodes defining element boundaries, input in clockwise or counter-clockwise order around the element.
$secTag tag associated with previously-defined SectionForceDeformation object.
Currently can be a PlateFiberSection, a ElasticMembranePlateSection and a LayeredShell section.
"""
def __init__(self, id, NodeI, NodeJ, NodeK, NodeL, Section, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._NodeK = NodeK
self._NodeL = NodeL
self._Section = Section
self.__dict__.update(kwargs)
self._CommandLine = 'element ShellNLDKGQ %d %d %d %d %d %d' % (
self._id, self._NodeI.id, self._NodeJ.id, self._NodeK.id, self._NodeL.id, self._Section.id)
class MVLEM(OpenSees):
"""
Element MVLEM $eleTag $Dens $iNode $jNode $m $c -thick {Thicknesses} -width {Widths} -rho {Reinforcing_ratios} -matConcrete {Concrete_tags} -matSteel {Steel_tags} -matShear {Shear_tag}
$eleTag Unique element object tag
$Dens Wall density
$iNode $jNode End node tags
$m Number of element macro-fibers
$c Location of center of rotation from the iNode, c = 0.4 (recommended)
{Thicknesses} Array of m macro-fiber thicknesses
{Widths} Array of m macro-fiber widths
{Reinforcing_ratios} Array of m reinforcing ratios corresponding to macro-fibers; for each fiber: rhoi = As,i/Agross,i (1 < i < m)
{Concrete _tags} Array of m uniaxialMaterial tags for concrete
{Steel_tags} Array of m uniaxialMaterial tags for steel
{Shear_tag} Tag of uniaxialMaterial for shear material
"""
def __init__(self, id, Density, NodeI, NodeJ, m, c, Thicknesses, Widths, ReinforcingRatios, ConcreteTags, SteelTags, ShearTag, **kwargs):
self._id = id
self.Density = Density
self._NodeI = NodeI
self._NodeJ = NodeJ
self.m = m
self.c = c
self.Thicknesses = Thicknesses
self.Widths = Widths
self.ReinforcingRatios = ReinforcingRatios
self.ConcreteTags = ConcreteTags,
self.SteelTags = SteelTags
self.ShearTag = ShearTag
self.__dict__.update(kwargs)
self._CommandLine = 'element MVLEM %d %f %d %d %d %f -thick %s -width %s -rho %s -matConcrete %s -matSteel %s -matShear %d'%(self._id, self.Density, self._NodeI.id, self._NodeJ.id, self.m, self.c, ''.join([' %f'%s for s in self.Thicknesses]), ''.join([' %f'%s for s in self.Widths]), ''.join([' %f'%s for s in self.ReinforcingRatios]), ''.join([' %d'%s.id for s in self.ConcreteTags]), ''.join([' %d'%s.id for s in self.SteelTags]), self.ShearTag.id)
| 50.712522
| 458
| 0.703241
|
4a02b48c9e0a81f07d16d67796e4429247675fc9
| 1,192
|
py
|
Python
|
2020/muggle_ocr/muggle_ocr_dome.py
|
aleimu/code-puzzle
|
1aaa86e6b49e1fe15a2a6c6be22badd783594024
|
[
"MIT"
] | null | null | null |
2020/muggle_ocr/muggle_ocr_dome.py
|
aleimu/code-puzzle
|
1aaa86e6b49e1fe15a2a6c6be22badd783594024
|
[
"MIT"
] | null | null | null |
2020/muggle_ocr/muggle_ocr_dome.py
|
aleimu/code-puzzle
|
1aaa86e6b49e1fe15a2a6c6be22badd783594024
|
[
"MIT"
] | null | null | null |
import time
# 1. 导入包
import muggle_ocr
"""
使用预置模型,预置模型包含了[ModelType.OCR, ModelType.Captcha] 两种
其中 ModelType.OCR 用于识别普通印刷文本, ModelType.Captcha 用于识别4-6位简单英数验证码
"""
# 打开印刷文本图片
with open("test14.png", "rb") as f:
ocr_bytes = f.read()
# 打开验证码图片
with open("test10.png", "rb") as f:
captcha_bytes = f.read()
# 2. 初始化;model_type 可选: [ModelType.OCR, ModelType.Captcha]
sdk = muggle_ocr.SDK(model_type=muggle_ocr.ModelType.OCR)
# ModelType.Captcha 可识别光学印刷文本
for i in range(5):
st = time.time()
# 3. 调用预测函数
text = sdk.predict(image_bytes=ocr_bytes)
print(text, time.time() - st)
# # ModelType.Captcha 可识别4-6位验证码
# sdk = muggle_ocr.SDK(model_type=muggle_ocr.ModelType.Captcha)
# for i in range(5):
# st = time.time()
# # 3. 调用预测函数
# text = sdk.predict(image_bytes=captcha_bytes)
# print(text, time.time() - st)
# """
# 使用自定义模型
# 支持基于 https://github.com/kerlomz/captcha_trainer 框架训练的模型
# 训练完成后,进入导出编译模型的[out]路径下, 把[graph]路径下的pb模型和[model]下的yaml配置文件放到同一路径下。
# 将 conf_path 参数指定为 yaml配置文件 的绝对或项目相对路径即可,其他步骤一致,如下示例:
# """
# with open(r"test3.jpg", "rb") as f:
# b = f.read()
# sdk = muggle_ocr.SDK(conf_path="./ocr.yaml")
# text = sdk.predict(image_bytes=b)
| 25.361702
| 69
| 0.689597
|
4a02b6c689f480ac9caf9838aee5e048fd719f3d
| 793
|
py
|
Python
|
modules/intelligence-gathering/masscan.py
|
nimert007/The-Penetration-Testers-Framework-PTF-
|
d8f7ae02b6655df93e1cabbca8481b5df55bd4c0
|
[
"FTL"
] | 4
|
2020-09-03T02:26:23.000Z
|
2022-02-25T06:57:25.000Z
|
modules/intelligence-gathering/masscan.py
|
Ben0xA/ptf
|
ebcc7a192c1286b962a826ec8e6f48334926b675
|
[
"FTL"
] | null | null | null |
modules/intelligence-gathering/masscan.py
|
Ben0xA/ptf
|
ebcc7a192c1286b962a826ec8e6f48334926b675
|
[
"FTL"
] | 3
|
2016-03-22T11:24:57.000Z
|
2019-03-28T11:31:25.000Z
|
#!/usr/bin/env python
#####################################
# Installation module for masscan
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="Mauro Risonho de Paula Assumpcao (firebits)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update masscan - a quick TCP/SYN port scanner"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/robertdavidgraham/masscan.git"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="masscan"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git,gcc,make,libpcap-dev"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git,gcc,make,libpcap-devel"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS="cd {INSTALL_LOCATION},make -j"
| 26.433333
| 84
| 0.698613
|
4a02b6eddd7ea360a036fed3a64bb299fe7b019a
| 89
|
py
|
Python
|
tests/test_jax_util.py
|
shawwn/jax-util
|
08fa4e49137ad1e3789c78f654c4c8e919cdf294
|
[
"MIT"
] | null | null | null |
tests/test_jax_util.py
|
shawwn/jax-util
|
08fa4e49137ad1e3789c78f654c4c8e919cdf294
|
[
"MIT"
] | null | null | null |
tests/test_jax_util.py
|
shawwn/jax-util
|
08fa4e49137ad1e3789c78f654c4c8e919cdf294
|
[
"MIT"
] | null | null | null |
from jax_util import __version__
def test_version():
assert __version__ == '0.1.0'
| 14.833333
| 33
| 0.719101
|
4a02b79ab5b69fcb199a6b61bbe54fbed524b355
| 49,949
|
py
|
Python
|
eland/series.py
|
ailanmar/eland
|
c14bc2403276ad989ef6adb9215db5906813d648
|
[
"Apache-2.0"
] | 335
|
2020-01-10T10:47:21.000Z
|
2022-03-31T12:33:03.000Z
|
eland/series.py
|
ailanmar/eland
|
c14bc2403276ad989ef6adb9215db5906813d648
|
[
"Apache-2.0"
] | 255
|
2020-01-10T11:09:32.000Z
|
2022-03-31T21:34:54.000Z
|
eland/series.py
|
ailanmar/eland
|
c14bc2403276ad989ef6adb9215db5906813d648
|
[
"Apache-2.0"
] | 51
|
2020-01-10T10:53:54.000Z
|
2022-03-15T15:48:18.000Z
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Series
---------
One-dimensional ndarray with axis labels (including time series).
The underlying data resides in Elasticsearch and the API aligns as much as
possible with pandas.DataFrame API.
This allows the eland.Series to access large datasets stored in Elasticsearch,
without storing the dataset in local memory.
Implementation Details
----------------------
Based on NDFrame which underpins eland.DataFrame
"""
import sys
import warnings
from collections.abc import Collection
from io import StringIO
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd # type: ignore
from pandas.io.common import _expand_user, stringify_path # type: ignore
import eland.plotting
from eland.arithmetics import ArithmeticNumber, ArithmeticSeries, ArithmeticString
from eland.common import DEFAULT_NUM_ROWS_DISPLAYED, docstring_parameter
from eland.filter import (
BooleanFilter,
Equal,
Greater,
GreaterEqual,
IsIn,
IsNull,
Less,
LessEqual,
NotFilter,
NotNull,
QueryFilter,
ScriptFilter,
)
from eland.ndframe import NDFrame
from eland.utils import to_list
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from eland.query_compiler import QueryCompiler
def _get_method_name() -> str:
return sys._getframe(1).f_code.co_name
class Series(NDFrame):
"""
pandas.Series like API that proxies into Elasticsearch index(es).
Parameters
----------
es_client : elasticsearch.Elasticsearch
A reference to a Elasticsearch python client
es_index_pattern : str
An Elasticsearch index pattern. This can contain wildcards.
es_index_field : str
The field to base the series on
Notes
-----
If the Elasticsearch index is deleted or index mappings are changed after this
object is created, the object is not rebuilt and so inconsistencies can occur.
See Also
--------
:pandas_api_docs:`pandas.Series`
Examples
--------
>>> ed.Series(es_client='http://localhost:9200', es_index_pattern='flights', name='Carrier')
0 Kibana Airlines
1 Logstash Airways
2 Logstash Airways
3 Kibana Airlines
4 Kibana Airlines
...
13054 Logstash Airways
13055 Logstash Airways
13056 Logstash Airways
13057 JetBeats
13058 JetBeats
Name: Carrier, Length: 13059, dtype: object
"""
def __init__(
self,
es_client: Optional["Elasticsearch"] = None,
es_index_pattern: Optional[str] = None,
name: Optional[str] = None,
es_index_field: Optional[str] = None,
_query_compiler: Optional["QueryCompiler"] = None,
) -> None:
# Series has 1 column
if name is None:
columns = None
else:
columns = [name]
super().__init__(
es_client=es_client,
es_index_pattern=es_index_pattern,
columns=columns,
es_index_field=es_index_field,
_query_compiler=_query_compiler,
)
hist = eland.plotting.ed_hist_series
@property
def empty(self) -> bool:
"""Determines if the Series is empty.
Returns:
True if the Series is empty.
False otherwise.
"""
return len(self.index) == 0
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the Series.
Returns
-------
shape: tuple
0. number of rows
1. number of columns
Notes
-----
- number of rows ``len(series)`` queries Elasticsearch
- number of columns == 1
Examples
--------
>>> df = ed.Series('http://localhost:9200', 'ecommerce', name='total_quantity')
>>> df.shape
(4675, 1)
"""
num_rows = len(self)
num_columns = 1
return num_rows, num_columns
@property
def es_field_name(self) -> pd.Index:
"""
Returns
-------
es_field_name: str
Return the Elasticsearch field name for this series
"""
return self._query_compiler.get_field_names(include_scripted_fields=True)[0]
@property
def name(self) -> pd.Index:
return self._query_compiler.columns[0]
@name.setter
def name(self, name: str) -> None:
self._query_compiler.rename({self.name: name}, inplace=True)
def rename(self, new_name: str) -> "Series":
"""
Rename name of series. Only column rename is supported. This does not change the underlying
Elasticsearch index, but adds a symbolic link from the new name (column) to the Elasticsearch field name.
For instance, if a field was called 'total_quantity' it could be renamed 'Total Quantity'.
Parameters
----------
new_name: str
Returns
-------
eland.Series
eland.Series with new name.
See Also
--------
:pandas_api_docs:`pandas.Series.rename`
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'flights')
>>> df.Carrier
0 Kibana Airlines
1 Logstash Airways
2 Logstash Airways
3 Kibana Airlines
4 Kibana Airlines
...
13054 Logstash Airways
13055 Logstash Airways
13056 Logstash Airways
13057 JetBeats
13058 JetBeats
Name: Carrier, Length: 13059, dtype: object
>>> df.Carrier.rename('Airline')
0 Kibana Airlines
1 Logstash Airways
2 Logstash Airways
3 Kibana Airlines
4 Kibana Airlines
...
13054 Logstash Airways
13055 Logstash Airways
13056 Logstash Airways
13057 JetBeats
13058 JetBeats
Name: Airline, Length: 13059, dtype: object
"""
return Series(
_query_compiler=self._query_compiler.rename({self.name: new_name})
)
def head(self, n: int = 5) -> "Series":
return Series(_query_compiler=self._query_compiler.head(n))
def tail(self, n: int = 5) -> "Series":
return Series(_query_compiler=self._query_compiler.tail(n))
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
random_state: Optional[int] = None,
) -> "Series":
return Series(
_query_compiler=self._query_compiler.sample(n, frac, random_state)
)
def value_counts(self, es_size: int = 10) -> pd.Series:
"""
Return the value counts for the specified field.
**Note we can only do this for aggregatable Elasticsearch fields - (in general) numeric and keyword
rather than text fields**
TODO - implement remainder of pandas arguments
Parameters
----------
es_size: int, default 10
Number of buckets to return counts for, automatically sorts by count descending.
This parameter is specific to `eland`, and determines how many term buckets
elasticsearch should return out of the overall terms list.
Returns
-------
pandas.Series
number of occurrences of each value in the column
See Also
--------
:pandas_api_docs:`pandas.Series.value_counts`
:es_api_docs:`search-aggregations-bucket-terms-aggregation`
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'flights')
>>> df['Carrier'].value_counts()
Logstash Airways 3331
JetBeats 3274
Kibana Airlines 3234
ES-Air 3220
Name: Carrier, dtype: int64
"""
if not isinstance(es_size, int):
raise TypeError("es_size must be a positive integer.")
elif es_size <= 0:
raise ValueError("es_size must be a positive integer.")
return self._query_compiler.value_counts(es_size)
# dtype not implemented for Series as causes query to fail
# in pandas.core.computation.ops.Term.type
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
"""
Return a string representation for a particular Series.
"""
buf = StringIO()
# max_rows and max_cols determine the maximum size of the pretty printed tabular
# representation of the series. pandas defaults are 60 and 20 respectively.
# series where len(series) > max_rows shows a truncated view with 10 rows shown.
max_rows = pd.get_option("display.max_rows")
min_rows = pd.get_option("display.min_rows")
if max_rows and len(self) > max_rows:
max_rows = min_rows
show_dimensions = pd.get_option("display.show_dimensions")
self.to_string(
buf=buf,
name=True,
dtype=True,
min_rows=min_rows,
max_rows=max_rows,
length=show_dimensions,
)
result = buf.getvalue()
return result
@docstring_parameter(DEFAULT_NUM_ROWS_DISPLAYED)
def to_string(
self,
buf=None,
na_rep="NaN",
float_format=None,
header=True,
index=True,
length=False,
dtype=False,
name=False,
max_rows=None,
min_rows=None,
) -> Optional[str]:
"""
Render a string representation of the Series.
Follows pandas implementation except when ``max_rows=None``. In this scenario, we set ``max_rows={0}`` to avoid
accidentally dumping an entire index. This can be overridden by explicitly setting ``max_rows``.
See Also
--------
:pandas_api_docs:`pandas.Series.to_string`
for argument details.
"""
# In pandas calling 'to_string' without max_rows set, will dump ALL rows - we avoid this
# by limiting rows by default.
num_rows = len(self) # avoid multiple calls
if num_rows <= DEFAULT_NUM_ROWS_DISPLAYED:
if max_rows is None:
max_rows = num_rows
else:
max_rows = min(num_rows, max_rows)
elif max_rows is None:
warnings.warn(
f"Series.to_string called without max_rows set "
f"- this will return entire index results. "
f"Setting max_rows={DEFAULT_NUM_ROWS_DISPLAYED}"
f" overwrite if different behaviour is required.",
UserWarning,
)
max_rows = DEFAULT_NUM_ROWS_DISPLAYED
# because of the way pandas handles max_rows=0, not having this throws an error
# see eland issue #56
if max_rows == 0:
max_rows = 1
# Create a slightly bigger dataframe than display
temp_series = self._build_repr(max_rows + 1)
if buf is not None:
_buf = _expand_user(stringify_path(buf))
else:
_buf = StringIO()
if num_rows == 0:
# Empty series are rendered differently than
# series with items. We can luckily use our
# example series in this case.
temp_series.head(0).to_string(
buf=_buf,
na_rep=na_rep,
float_format=float_format,
header=header,
index=index,
length=length,
dtype=dtype,
name=name,
max_rows=max_rows,
)
else:
# Create repr of fake series without name, length, dtype summary
temp_series.to_string(
buf=_buf,
na_rep=na_rep,
float_format=float_format,
header=header,
index=index,
length=False,
dtype=False,
name=False,
max_rows=max_rows,
)
# Create the summary
footer = []
if name and self.name is not None:
footer.append(f"Name: {self.name}")
if length and len(self) > max_rows:
footer.append(f"Length: {len(self.index)}")
if dtype:
footer.append(f"dtype: {temp_series.dtype}")
if footer:
_buf.write(f"\n{', '.join(footer)}")
if buf is None:
result = _buf.getvalue()
return result
def to_pandas(self, show_progress: bool = False) -> pd.Series:
return self._query_compiler.to_pandas(show_progress=show_progress)[self.name]
@property
def dtype(self) -> np.dtype:
"""
Return the dtype object of the underlying data.
See Also
--------
:pandas_api_docs:`pandas.Series.dtype`
"""
return self._query_compiler.dtypes[0]
@property
def es_dtype(self) -> str:
"""
Return the Elasticsearch type of the underlying data.
"""
return self._query_compiler.es_dtypes[0]
def __gt__(self, other: Union[int, float, "Series"]) -> BooleanFilter:
if isinstance(other, Series):
# Need to use scripted query to compare to values
painless = f"doc['{self.name}'].value > doc['{other.name}'].value"
return ScriptFilter(painless, lang="painless")
elif isinstance(other, (int, float)):
return Greater(field=self.name, value=other)
else:
raise NotImplementedError(other, type(other))
def __lt__(self, other: Union[int, float, "Series"]) -> BooleanFilter:
if isinstance(other, Series):
# Need to use scripted query to compare to values
painless = f"doc['{self.name}'].value < doc['{other.name}'].value"
return ScriptFilter(painless, lang="painless")
elif isinstance(other, (int, float)):
return Less(field=self.name, value=other)
else:
raise NotImplementedError(other, type(other))
def __ge__(self, other: Union[int, float, "Series"]) -> BooleanFilter:
if isinstance(other, Series):
# Need to use scripted query to compare to values
painless = f"doc['{self.name}'].value >= doc['{other.name}'].value"
return ScriptFilter(painless, lang="painless")
elif isinstance(other, (int, float)):
return GreaterEqual(field=self.name, value=other)
else:
raise NotImplementedError(other, type(other))
def __le__(self, other: Union[int, float, "Series"]) -> BooleanFilter:
if isinstance(other, Series):
# Need to use scripted query to compare to values
painless = f"doc['{self.name}'].value <= doc['{other.name}'].value"
return ScriptFilter(painless, lang="painless")
elif isinstance(other, (int, float)):
return LessEqual(field=self.name, value=other)
else:
raise NotImplementedError(other, type(other))
def __eq__(self, other: Union[int, float, str, "Series"]) -> BooleanFilter:
if isinstance(other, Series):
# Need to use scripted query to compare to values
painless = f"doc['{self.name}'].value == doc['{other.name}'].value"
return ScriptFilter(painless, lang="painless")
elif isinstance(other, (int, float)):
return Equal(field=self.name, value=other)
elif isinstance(other, str):
return Equal(field=self.name, value=other)
else:
raise NotImplementedError(other, type(other))
def __ne__(self, other: Union[int, float, str, "Series"]) -> BooleanFilter:
if isinstance(other, Series):
# Need to use scripted query to compare to values
painless = f"doc['{self.name}'].value != doc['{other.name}'].value"
return ScriptFilter(painless, lang="painless")
elif isinstance(other, (int, float)):
return NotFilter(Equal(field=self.name, value=other))
elif isinstance(other, str):
return NotFilter(Equal(field=self.name, value=other))
else:
raise NotImplementedError(other, type(other))
def isin(self, other: Union[Collection, pd.Series]) -> BooleanFilter:
if isinstance(other, (Collection, pd.Series)):
return IsIn(field=self.name, value=to_list(other))
else:
raise NotImplementedError(other, type(other))
def isna(self) -> BooleanFilter:
"""
Detect missing values.
Returns
-------
eland.Series
Mask of bool values for each element in Series that indicates whether an element is not an NA value.
See Also
--------
:pandas_api_docs:`pandas.Series.isna`
"""
return IsNull(field=self.name)
isnull = isna
def notna(self) -> BooleanFilter:
"""
Detect existing (non-missing) values.
Returns
-------
eland.Series
Mask of bool values for each element in Series that indicates whether an element is not an NA value
See Also
--------
:pandas_api_docs:`pandas.Series.notna`
"""
return NotNull(field=self.name)
notnull = notna
def quantile(
self, q: Union[int, float, List[int], List[float]] = 0.5
) -> Union[pd.Series, Any]:
"""
Used to calculate quantile for a given Series.
Parameters
----------
q:
float or array like, default 0.5
Value between 0 <= q <= 1, the quantile(s) to compute.
Returns
-------
pandas.Series or any single dtype
See Also
--------
:pandas_api_docs:`pandas.Series.quantile`
Examples
--------
>>> ed_flights = ed.DataFrame('http://localhost:9200', 'flights')
>>> ed_flights["timestamp"].quantile([.2,.5,.75]) # doctest: +SKIP
0.20 2018-01-09 04:30:57.289159912
0.50 2018-01-21 23:39:27.031627441
0.75 2018-02-01 04:54:59.256136963
Name: timestamp, dtype: datetime64[ns]
>>> ed_flights["dayOfWeek"].quantile() # doctest: +SKIP
3.0
>>> ed_flights["timestamp"].quantile() # doctest: +SKIP
Timestamp('2018-01-22 00:12:48.844534180')
"""
return self._query_compiler.quantile(
quantiles=q, numeric_only=None, is_dataframe=False
)
@property
def ndim(self) -> int:
"""
Returns 1 by definition of a Series
Returns
-------
int
By definition 1
See Also
--------
:pandas_api_docs:`pandas.Series.ndim`
"""
return 1
def filter(
self,
items: Optional[Sequence[str]] = None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis: Optional[Union[int, str]] = None,
) -> "Series":
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int) or axis name (str).
By default this is the info axis, ‘index’ for Series, ‘columns’ for DataFrame.
Returns
-------
eland.Series
See Also
--------
:pandas_api_docs:`pandas.Series.filter`
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
"""
filter_options_passed = sum([items is not None, bool(like), bool(regex)])
if filter_options_passed > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
elif filter_options_passed == 0:
raise TypeError("Must pass either 'items', 'like', or 'regex'")
# axis defaults to 'columns' for DataFrame, 'index' for Series
if axis is None:
axis = "index"
pd.Series._get_axis_name(axis)
new_query_compiler = self._query_compiler.filter(
items=items, like=like, regex=regex
)
return Series(_query_compiler=new_query_compiler)
def mode(self, es_size: int = 10) -> pd.Series:
"""
Calculate mode of a series
Parameters
----------
es_size: default 10
number of rows to be returned if mode has multiple values
See Also
--------
:pandas_api_docs:`pandas.Series.mode`
Examples
--------
>>> ed_ecommerce = ed.DataFrame('http://localhost:9200', 'ecommerce')
>>> ed_ecommerce["day_of_week"].mode()
0 Thursday
dtype: object
>>> ed_ecommerce["order_date"].mode()
0 2016-12-02 20:36:58
1 2016-12-04 23:44:10
2 2016-12-08 06:21:36
3 2016-12-08 09:38:53
4 2016-12-12 11:38:24
5 2016-12-12 19:46:34
6 2016-12-14 18:00:00
7 2016-12-15 11:38:24
8 2016-12-22 19:39:22
9 2016-12-24 06:21:36
dtype: datetime64[ns]
>>> ed_ecommerce["order_date"].mode(es_size=3)
0 2016-12-02 20:36:58
1 2016-12-04 23:44:10
2 2016-12-08 06:21:36
dtype: datetime64[ns]
"""
return self._query_compiler.mode(is_dataframe=False, es_size=es_size)
def es_match(
self,
text: str,
*,
match_phrase: bool = False,
match_only_text_fields: bool = True,
analyzer: Optional[str] = None,
fuzziness: Optional[Union[int, str]] = None,
**kwargs: Any,
) -> QueryFilter:
"""Filters data with an Elasticsearch ``match`` or ``match_phrase``
query depending on the given parameters.
Read more about `Full-Text Queries in Elasticsearch <https://www.elastic.co/guide/en/elasticsearch/reference/current/full-text-queries.html>`_
All additional keyword arguments are passed in the body of the match query.
Parameters
----------
text: str
String of text to search for
match_phrase: bool, default False
If True will use ``match_phrase`` instead of ``match`` query which takes into account
the order of the ``text`` parameter.
match_only_text_fields: bool, default True
When True this function will raise an error if any non-text fields
are queried to prevent fields that aren't analyzed from not working properly.
Set to False to ignore this preventative check.
analyzer: str, optional
Specify which analyzer to use for the match query
fuzziness: int, str, optional
Specify the fuzziness option for the match query
Returns
-------
QueryFilter
Boolean filter to be combined with other filters and
then passed to DataFrame[...].
Examples
--------
>>> df = ed.DataFrame(
... "http://localhost:9200", "ecommerce",
... columns=["category", "taxful_total_price"]
... )
>>> df[
... df.category.es_match("Men's")
... & (df.taxful_total_price > 200.0)
... ].head(5)
category taxful_total_price
13 [Men's Clothing] 266.96
33 [Men's Clothing] 221.98
54 [Men's Clothing] 234.98
93 [Men's Shoes, Women's Accessories] 239.98
273 [Men's Shoes] 214.98
<BLANKLINE>
[5 rows x 2 columns]
"""
return self._query_compiler.es_match(
text,
columns=[self.name],
match_phrase=match_phrase,
match_only_text_fields=match_only_text_fields,
analyzer=analyzer,
fuzziness=fuzziness,
**kwargs,
)
def es_info(self) -> str:
buf = StringIO()
super()._es_info(buf)
return buf.getvalue()
def __add__(self, right: "Series") -> "Series":
"""
Return addition of series and right, element-wise (binary operator add).
Parameters
----------
right: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> df.taxful_total_price + 1
0 37.980000
1 54.980000
2 200.979996
3 175.979996
4 81.980003
Name: taxful_total_price, dtype: float64
>>> df.total_quantity
0 2
1 2
2 2
3 2
4 2
Name: total_quantity, dtype: int64
>>> df.taxful_total_price + df.total_quantity
0 38.980000
1 55.980000
2 201.979996
3 176.979996
4 82.980003
dtype: float64
>>> df.customer_first_name + df.customer_last_name
0 EddieUnderwood
1 MaryBailey
2 GwenButler
3 DianeChandler
4 EddieWeber
dtype: object
>>> "First name: " + df.customer_first_name
0 First name: Eddie
1 First name: Mary
2 First name: Gwen
3 First name: Diane
4 First name: Eddie
Name: customer_first_name, dtype: object
"""
return self._numeric_op(right, _get_method_name())
def __truediv__(self, right: "Series") -> "Series":
"""
Return floating division of series and right, element-wise (binary operator truediv).
Parameters
----------
right: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> df.total_quantity
0 2
1 2
2 2
3 2
4 2
Name: total_quantity, dtype: int64
>>> df.taxful_total_price / df.total_quantity
0 18.490000
1 26.990000
2 99.989998
3 87.489998
4 40.490002
dtype: float64
"""
return self._numeric_op(right, _get_method_name())
def __floordiv__(self, right: "Series") -> "Series":
"""
Return integer division of series and right, element-wise (binary operator floordiv //).
Parameters
----------
right: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> df.total_quantity
0 2
1 2
2 2
3 2
4 2
Name: total_quantity, dtype: int64
>>> df.taxful_total_price // df.total_quantity
0 18.0
1 26.0
2 99.0
3 87.0
4 40.0
dtype: float64
"""
return self._numeric_op(right, _get_method_name())
def __mod__(self, right: "Series") -> "Series":
"""
Return modulo of series and right, element-wise (binary operator mod %).
Parameters
----------
right: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> df.total_quantity
0 2
1 2
2 2
3 2
4 2
Name: total_quantity, dtype: int64
>>> df.taxful_total_price % df.total_quantity
0 0.980000
1 1.980000
2 1.979996
3 0.979996
4 0.980003
dtype: float64
"""
return self._numeric_op(right, _get_method_name())
def __mul__(self, right: "Series") -> "Series":
"""
Return multiplication of series and right, element-wise (binary operator mul).
Parameters
----------
right: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> df.total_quantity
0 2
1 2
2 2
3 2
4 2
Name: total_quantity, dtype: int64
>>> df.taxful_total_price * df.total_quantity
0 73.959999
1 107.959999
2 399.959991
3 349.959991
4 161.960007
dtype: float64
"""
return self._numeric_op(right, _get_method_name())
def __sub__(self, right: "Series") -> "Series":
"""
Return subtraction of series and right, element-wise (binary operator sub).
Parameters
----------
right: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> df.total_quantity
0 2
1 2
2 2
3 2
4 2
Name: total_quantity, dtype: int64
>>> df.taxful_total_price - df.total_quantity
0 34.980000
1 51.980000
2 197.979996
3 172.979996
4 78.980003
dtype: float64
"""
return self._numeric_op(right, _get_method_name())
def __pow__(self, right: "Series") -> "Series":
"""
Return exponential power of series and right, element-wise (binary operator pow).
Parameters
----------
right: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> df.total_quantity
0 2
1 2
2 2
3 2
4 2
Name: total_quantity, dtype: int64
>>> df.taxful_total_price ** df.total_quantity
0 1367.520366
1 2913.840351
2 39991.998691
3 30617.998905
4 6557.760944
dtype: float64
"""
return self._numeric_op(right, _get_method_name())
def __radd__(self, left: "Series") -> "Series":
"""
Return addition of series and left, element-wise (binary operator add).
Parameters
----------
left: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> 1 + df.taxful_total_price
0 37.980000
1 54.980000
2 200.979996
3 175.979996
4 81.980003
Name: taxful_total_price, dtype: float64
"""
return self._numeric_op(left, _get_method_name())
def __rtruediv__(self, left: "Series") -> "Series":
"""
Return division of series and left, element-wise (binary operator div).
Parameters
----------
left: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> 1.0 / df.taxful_total_price
0 0.027042
1 0.018525
2 0.005001
3 0.005715
4 0.012349
Name: taxful_total_price, dtype: float64
"""
return self._numeric_op(left, _get_method_name())
def __rfloordiv__(self, left: "Series") -> "Series":
"""
Return integer division of series and left, element-wise (binary operator floordiv //).
Parameters
----------
left: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> 500.0 // df.taxful_total_price
0 13.0
1 9.0
2 2.0
3 2.0
4 6.0
Name: taxful_total_price, dtype: float64
"""
return self._numeric_op(left, _get_method_name())
def __rmod__(self, left: "Series") -> "Series":
"""
Return modulo of series and left, element-wise (binary operator mod %).
Parameters
----------
left: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> 500.0 % df.taxful_total_price
0 19.260006
1 14.180004
2 100.040009
3 150.040009
4 14.119980
Name: taxful_total_price, dtype: float64
"""
return self._numeric_op(left, _get_method_name())
def __rmul__(self, left: "Series") -> "Series":
"""
Return multiplication of series and left, element-wise (binary operator mul).
Parameters
----------
left: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> 10.0 * df.taxful_total_price
0 369.799995
1 539.799995
2 1999.799957
3 1749.799957
4 809.800034
Name: taxful_total_price, dtype: float64
"""
return self._numeric_op(left, _get_method_name())
def __rpow__(self, left: "Series") -> "Series":
"""
Return exponential power of series and left, element-wise (binary operator pow).
Parameters
----------
left: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.total_quantity
0 2
1 2
2 2
3 2
4 2
Name: total_quantity, dtype: int64
>>> np.int_(2) ** df.total_quantity
0 4.0
1 4.0
2 4.0
3 4.0
4 4.0
Name: total_quantity, dtype: float64
"""
return self._numeric_op(left, _get_method_name())
def __rsub__(self, left: "Series") -> "Series":
"""
Return subtraction of series and left, element-wise (binary operator sub).
Parameters
----------
left: eland.Series
Returns
-------
eland.Series
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'ecommerce').head(5)
>>> df.taxful_total_price
0 36.98
1 53.98
2 199.98
3 174.98
4 80.98
Name: taxful_total_price, dtype: float64
>>> 1.0 - df.taxful_total_price
0 -35.980000
1 -52.980000
2 -198.979996
3 -173.979996
4 -79.980003
Name: taxful_total_price, dtype: float64
"""
return self._numeric_op(left, _get_method_name())
add = __add__
div = __truediv__
divide = __truediv__
floordiv = __floordiv__
mod = __mod__
mul = __mul__
multiply = __mul__
pow = __pow__
sub = __sub__
subtract = __sub__
truediv = __truediv__
radd = __radd__
rdiv = __rtruediv__
rdivide = __rtruediv__
rfloordiv = __rfloordiv__
rmod = __rmod__
rmul = __rmul__
rmultiply = __rmul__
rpow = __rpow__
rsub = __rsub__
rsubtract = __rsub__
rtruediv = __rtruediv__
# __div__ is technically Python 2.x only
# but pandas has it so we do too.
__div__ = __truediv__
__rdiv__ = __rtruediv__
def _numeric_op(self, right: Any, method_name: str) -> "Series":
"""
return a op b
a & b == Series
a & b must share same Elasticsearch client, index_pattern and index_field
a == Series, b == numeric or string
Naming of the resulting Series
------------------------------
result = SeriesA op SeriesB
result.name == None
result = SeriesA op np.number
result.name == SeriesA.name
result = SeriesA op str
result.name == SeriesA.name
Naming is consistent for rops
"""
# print("_numeric_op", self, right, method_name)
if isinstance(right, Series):
# Check we can the 2 Series are compatible (raises on error):
self._query_compiler.check_arithmetics(right._query_compiler)
right_object = ArithmeticSeries(
right._query_compiler, right.name, right.dtype
)
display_name = None
elif np.issubdtype(np.dtype(type(right)), np.number):
right_object = ArithmeticNumber(right, np.dtype(type(right)))
display_name = self.name
elif isinstance(right, str):
right_object = ArithmeticString(right)
display_name = self.name
else:
raise TypeError(
f"unsupported operation type(s) [{method_name!r}] "
f"for operands ['{type(self)}' with dtype '{self.dtype}', "
f"'{type(right).__name__}']"
)
left_object = ArithmeticSeries(self._query_compiler, self.name, self.dtype)
left_object.arithmetic_operation(method_name, right_object)
series = Series(
_query_compiler=self._query_compiler.arithmetic_op_fields(
display_name, left_object
)
)
# force set name to 'display_name'
series._query_compiler._mappings.display_names = [display_name]
return series
def max(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the maximum of the Series values
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Returns
-------
float
max value
See Also
--------
:pandas_api_docs:`pandas.Series.max`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['AvgTicketPrice']
>>> int(s.max())
1199
"""
results = super().max(numeric_only=numeric_only)
return results.squeeze()
def mean(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the mean of the Series values
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Returns
-------
float
mean value
See Also
--------
:pandas_api_docs:`pandas.Series.mean`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['AvgTicketPrice']
>>> int(s.mean())
628
"""
results = super().mean(numeric_only=numeric_only)
return results.squeeze()
def median(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the median of the Series values
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Returns
-------
float
median value
See Also
--------
:pandas_api_docs:`pandas.Series.median`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['AvgTicketPrice']
>>> int(s.median())
640
"""
results = super().median(numeric_only=numeric_only)
return results.squeeze()
def min(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the minimum of the Series values
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Returns
-------
float
min value
See Also
--------
:pandas_api_docs:`pandas.Series.min`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['AvgTicketPrice']
>>> int(s.min())
100
"""
results = super().min(numeric_only=numeric_only)
return results.squeeze()
def sum(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the sum of the Series values
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Returns
-------
float
sum of all values
See Also
--------
:pandas_api_docs:`pandas.Series.sum`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['AvgTicketPrice']
>>> int(s.sum())
8204364
"""
results = super().sum(numeric_only=numeric_only)
return results.squeeze()
def nunique(self) -> pd.Series:
"""
Return the number of unique values in a Series
Returns
-------
int
Number of unique values
See Also
--------
:pandas_api_docs:`pandas.Series.nunique`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['Carrier']
>>> s.nunique()
4
"""
results = super().nunique()
return results.squeeze()
def var(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return variance for a Series
Returns
-------
float
var value
See Also
--------
:pandas_api_docs:`pandas.Series.var`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['AvgTicketPrice']
>>> int(s.var())
70964
"""
results = super().var(numeric_only=numeric_only)
return results.squeeze()
def std(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return standard deviation for a Series
Returns
-------
float
std value
See Also
--------
:pandas_api_docs:`pandas.Series.var`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['AvgTicketPrice']
>>> int(s.std())
266
"""
results = super().std(numeric_only=numeric_only)
return results.squeeze()
def mad(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return median absolute deviation for a Series
Returns
-------
float
mad value
See Also
--------
:pandas_api_docs:`pandas.Series.mad`
Examples
--------
>>> s = ed.DataFrame('http://localhost:9200', 'flights')['AvgTicketPrice']
>>> int(s.mad())
213
"""
results = super().mad(numeric_only=numeric_only)
return results.squeeze()
def describe(self) -> pd.Series:
"""
Generate descriptive statistics that summarize the central tendency, dispersion and shape of a
dataset’s distribution, excluding NaN values.
Analyzes both numeric and object series, as well as DataFrame column sets of mixed data types.
The output will vary depending on what is provided. Refer to the notes below for more detail.
TODO - add additional arguments (current only numeric values supported)
Returns
-------
pandas.Series:
Summary information
See Also
--------
:pandas_api_docs:`pandas.Series.describe`
Examples
--------
>>> df = ed.DataFrame('http://localhost:9200', 'flights') # ignoring percentiles as they don't generate consistent results
>>> df.AvgTicketPrice.describe() # doctest: +SKIP
count 13059.000000
mean 628.253689
std 266.386661
min 100.020531
...
...
...
max 1199.729004
Name: AvgTicketPrice, dtype: float64
"""
return super().describe().squeeze()
# def values TODO - not implemented as causes current implementation of query to fail
def to_numpy(self) -> None:
"""
Not implemented.
In pandas this returns a Numpy representation of the Series. This would involve scan/scrolling the
entire index.
If this is required, call ``ed.eland_to_pandas(ed_series).values``, *but beware this will scan/scroll the entire
Elasticsearch index(s) into memory.*
See Also
--------
:pandas_api_docs:`pandas.DataFrame.to_numpy`
eland_to_pandas
Examples
--------
>>> ed_s = ed.Series('http://localhost:9200', 'flights', name='Carrier').head(5)
>>> pd_s = ed.eland_to_pandas(ed_s)
>>> print(f"type(ed_s)={type(ed_s)}\\ntype(pd_s)={type(pd_s)}")
type(ed_s)=<class 'eland.series.Series'>
type(pd_s)=<class 'pandas.core.series.Series'>
>>> ed_s
0 Kibana Airlines
1 Logstash Airways
2 Logstash Airways
3 Kibana Airlines
4 Kibana Airlines
Name: Carrier, dtype: object
>>> pd_s.to_numpy()
array(['Kibana Airlines', 'Logstash Airways', 'Logstash Airways',
'Kibana Airlines', 'Kibana Airlines'], dtype=object)
"""
raise NotImplementedError(
"This method would scan/scroll the entire Elasticsearch index(s) into memory."
"If this is explicitly required and there is sufficient memory, call `ed.eland_to_pandas(ed_df).values`"
)
| 29.67855
| 150
| 0.544555
|
4a02b8041679e30b176c1408caca95295fab81f5
| 5,593
|
py
|
Python
|
src/highdicom/sr/utils.py
|
malaterre/highdicom
|
1d02c328d1f7aee028d5d61c124d2aff11396603
|
[
"MIT"
] | 64
|
2020-02-28T13:46:47.000Z
|
2022-02-16T15:48:58.000Z
|
src/highdicom/sr/utils.py
|
malaterre/highdicom
|
1d02c328d1f7aee028d5d61c124d2aff11396603
|
[
"MIT"
] | 80
|
2020-02-29T01:32:19.000Z
|
2022-02-14T23:54:38.000Z
|
src/highdicom/sr/utils.py
|
malaterre/highdicom
|
1d02c328d1f7aee028d5d61c124d2aff11396603
|
[
"MIT"
] | 18
|
2020-02-28T15:11:17.000Z
|
2022-01-02T13:23:54.000Z
|
"""Utilities for working with SR document instances."""
from typing import List, Optional, Union
from pydicom.dataset import Dataset
from pydicom.sr.coding import Code
from highdicom.sr.coding import CodedConcept
from highdicom.sr.enum import ValueTypeValues, RelationshipTypeValues
from highdicom.sr.value_types import ContentItem
def find_content_items(
dataset: Dataset,
name: Optional[Union[CodedConcept, Code]] = None,
value_type: Optional[Union[ValueTypeValues, str]] = None,
relationship_type: Optional[Union[RelationshipTypeValues, str]] = None,
recursive: bool = False
) -> List[Dataset]:
"""Finds content items in a Structured Report document that match a given
query.
Parameters
----------
dataset: pydicom.dataset.Dataset
SR document instance
name: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code, None], optional
Coded name that items should have
value_type: Union[highdicom.sr.ValueTypeValues, str, None], optional
Type of value that items should have
(e.g. ``highdicom.sr.ValueTypeValues.CONTAINER``)
relationship_type: Union[highdicom.sr.RelationshipTypeValues, str, None], optional
Type of relationship that items should have with its parent
(e.g. ``highdicom.sr.RelationshipTypeValues.CONTAINS``)
recursive: bool, optional
Whether search should be performed recursively, i.e. whether contained
child content items should also be queried
Returns
-------
List[pydicom.dataset.Dataset]
flat list of all content items that matched the query
Raises
------
AttributeError
When data set does not contain Content Sequence attribute.
""" # noqa: E501
def has_name(
item: ContentItem,
name: Optional[Union[Code, CodedConcept]]
) -> bool:
if name is None:
return True
return item.name == name
def has_value_type(
item: ContentItem,
value_type: Optional[Union[ValueTypeValues, str]]
) -> bool:
if value_type is None:
return True
value_type = ValueTypeValues(value_type)
return item.value_type == value_type
def has_relationship_type(
item: ContentItem,
relationship_type: Optional[Union[RelationshipTypeValues, str]]
) -> bool:
if relationship_type is None:
return True
if getattr(item, 'relationship_type', None) is None:
return False
relationship_type = RelationshipTypeValues(relationship_type)
return item.relationship_type == relationship_type
if not hasattr(dataset, 'ContentSequence'):
raise AttributeError(
'Data set does not contain a Content Sequence attribute.'
)
def search_tree(
node: Dataset,
name: Optional[Union[CodedConcept, Code]],
value_type: Optional[Union[ValueTypeValues, str]],
relationship_type: Optional[Union[RelationshipTypeValues, str]],
recursive: bool
) -> List:
matched_content_items = []
for i, content_item in enumerate(node.ContentSequence):
name_code = content_item.ConceptNameCodeSequence[0]
item = ContentItem(
value_type=content_item.ValueType,
name=CodedConcept(
value=name_code.CodeValue,
scheme_designator=name_code.CodingSchemeDesignator,
meaning=name_code.CodeMeaning
),
relationship_type=content_item.get('RelationshipType', None)
)
if (has_name(item, name) and
has_value_type(item, value_type) and
has_relationship_type(item, relationship_type)):
matched_content_items.append(content_item)
if hasattr(content_item, 'ContentSequence') and recursive:
matched_content_items += search_tree(
node=content_item,
name=name,
value_type=value_type,
relationship_type=relationship_type,
recursive=recursive
)
return matched_content_items
return search_tree(
node=dataset,
name=name,
value_type=value_type,
relationship_type=relationship_type,
recursive=recursive
)
def get_coded_name(item: Dataset) -> CodedConcept:
"""Gets the concept name of a SR Content Item.
Parameters
----------
item: pydicom.dataset.Dataset
Content Item
Returns
-------
highdicom.sr.CodedConcept
Concept name
"""
try:
name = item.ConceptNameCodeSequence[0]
except AttributeError:
raise AttributeError(
'Dataset does not contain attribute "ConceptNameCodeSequence" and '
'thus doesn\'t represent a SR Content Item.'
)
return CodedConcept.from_dataset(name)
def get_coded_value(item: Dataset) -> CodedConcept:
"""Gets the value of a SR Content Item with Value Type CODE.
Parameters
----------
item: pydicom.dataset.Dataset
Content Item
Returns
-------
highdicom.sr.CodedConcept
Value
"""
try:
value = item.ConceptCodeSequence[0]
except AttributeError:
raise AttributeError(
'Dataset does not contain attribute "ConceptCodeSequence" and '
'thus doesn\'t represent a SR Content Item of Value Type CODE.'
)
return CodedConcept.from_dataset(value)
| 32.9
| 86
| 0.639371
|
4a02b8deef77ac2acd6a1ed67de6a626264865d8
| 2,161
|
py
|
Python
|
wip_bots/bot.py
|
axu8888/pc-bot
|
8a51f7c6069ea9189ecfd283040772677bc02a1b
|
[
"MIT"
] | null | null | null |
wip_bots/bot.py
|
axu8888/pc-bot
|
8a51f7c6069ea9189ecfd283040772677bc02a1b
|
[
"MIT"
] | null | null | null |
wip_bots/bot.py
|
axu8888/pc-bot
|
8a51f7c6069ea9189ecfd283040772677bc02a1b
|
[
"MIT"
] | null | null | null |
from discord.ext import tasks, commands
import pandas
import praw
import time
import asyncio
import discord
# @bot.command(name = "scrape")
# async def scrape():
# print("i am here")
# subreddit = reddit.subreddit('buildapcsales')
# channel = bot.get_channel(411692488177483787)
# if(channel == None):
# print("channel is None")
# for post in subreddit.new(limit=10):
# # for word in keywords:
# # temp = word.lower()
# # if temp in post.title:
# print("nice")
# await channel.send(post.title + " " + post.url)
# await asyncio.sleep(5)
client = discord.Client()
bot = commands.Bot(command_prefix='!')
reddit = praw.Reddit(client_id='9qfv43B5Uu7bcA', client_secret='kfS26jo2EU8-XoafwFfK4LVmfxK6iA', user_agent = 'yodude8888')
def get_channel(channels, channel_name):
for channel in client.get_all_channels():
if channel.name == channel_name:
return channel
return None
client = discord.Client()
general_channel = get_channel(client.get_all_channels(), 'general')
@bot.event
async def on_ready():
print("fjaewoifjoaweifjaoweifjaweoifjweoaif")
bot.loop.create_task(scrape())
async def scrape():
#await bot.wait_until_ready()
subreddit = reddit.subreddit('buildapcsales')
channel = bot.get_channel(411692488177483787)
while not client.is_closed:
for post in subreddit.new(limit=10):
# for word in keywords:
# temp = word.lower()
# if temp in post.title:
print("nice")
await channel.send(post.title + " " + post.url)
await asyncio.sleep(5)
@bot.command(name="idea", help = "idea generator")
async def notify(ctx):
await ctx.send("I love Kerim")
# @bot.event
# async def on_ready():
# print("pog")
with open("BOT_TOKEN.txt", "r") as token_file:
TOKEN = token_file.read()
print("Token file read")
bot.run(TOKEN)
# hot_posts = subreddit.top("day",limit=10)
# for post in hot_posts:
# print(post.title)
keywords = ['b550', '3080', '5800x', '5600x', '3080 ti', '3070', '3080']
dict = {}
| 21.61
| 123
| 0.633966
|
4a02b9d79022079c08b0eedbf2731a6923333dd0
| 2,522
|
py
|
Python
|
src/first_partial_exercises/ex_5.py
|
soyalextreme/estructura-datos-algoritmos
|
e82729fb4a4babad7c9d781e7abcb97aaafdfaab
|
[
"Unlicense"
] | 1
|
2020-09-09T14:31:28.000Z
|
2020-09-09T14:31:28.000Z
|
src/first_partial_exercises/ex_5.py
|
soyalextreme/estructura-datos-algoritmos
|
e82729fb4a4babad7c9d781e7abcb97aaafdfaab
|
[
"Unlicense"
] | null | null | null |
src/first_partial_exercises/ex_5.py
|
soyalextreme/estructura-datos-algoritmos
|
e82729fb4a4babad7c9d781e7abcb97aaafdfaab
|
[
"Unlicense"
] | null | null | null |
"""
27-09-2020
Excersice 5
Estructura de Datos y algoritmos
6to Semestre
Client Ticket Shop
Alejandro AS
"""
from first_partial_exercises.ex_2 import Report
from lib.menu.epp_menu import Menu
from lib.inputs import input_int, input_float, input_str_non_empty
from lib.util import still_bool, clean_screen
from first_partial_exercises.ex_4 import Ticket, Article
clients = []
class Client():
"""
Client but with the email for multiple ticket transactions.
"""
name = ""
email = ""
def __init__(self, name, email):
self.name = name
self.email = email
def selection_client():
"""
Function to select the client to add transaction
"""
s = ""
if len(clients) == 1:
return clients[0]
i = 0
while s.lower() != "s":
clean_screen()
print(clients[i].name)
s = input("[s] to select the user\n_")
if s.lower() == "s":
continue
if i == len(clients) - 1:
i = 0
else:
i += 1
return clients[i]
def handle_add_ticket():
"""
Function that handles to add a ticket.
"""
if len(clients) == 0:
print("No clients register for the moment, Cant do transactions.")
print("Please, Add a client")
return 0
client = selection_client()
t = Ticket(client.name)
still = True
while still:
clean_screen()
print(f"Client: {client.name}")
print("Adding New Article")
print("*" * 20)
desc = input_str_non_empty("Description Article: ")
amount = input_int("Amount: ", False, min_val=1, default=1)
price = round(input_float("Price per product: ",
False, min_val=1, default=1), 2)
new_article = Article(desc, amount, price)
t.add_article(new_article)
still = still_bool("More Products? [y/n]: ")
t.show_report()
def handle_add_client():
"""
Function that handles to add a client
"""
name = input_str_non_empty("Client Name: ")
email = input_str_non_empty("Client email: ")
c = Client(name, email)
clients.append(c)
print(f"{name} Added to clients")
def main():
"""
Main function that handles this excersice.
"""
ARTICLE_REPORT_OPC = [
(1, "ADD CLIENT", handle_add_client),
(2, "NEW TRANSACTION", handle_add_ticket),
]
m = Menu(ARTICLE_REPORT_OPC, exit_val=3, still=False)
m.start()
| 23.137615
| 74
| 0.587232
|
4a02bb458bd27f6a76f6e731e3857441285c8221
| 18,949
|
py
|
Python
|
lib/plugins.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | null | null | null |
lib/plugins.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | 1
|
2021-11-15T17:47:29.000Z
|
2021-11-15T17:47:29.000Z
|
lib/plugins.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | 1
|
2017-11-13T23:19:46.000Z
|
2017-11-13T23:19:46.000Z
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import namedtuple
import traceback
import sys
import os
import imp
import pkgutil
import time
import threading
from .util import *
from .i18n import _
from .util import profiler, PrintError, DaemonThread, UserCancelled, ThreadJob
plugin_loaders = {}
hook_names = set()
hooks = {}
class Plugins(DaemonThread):
@profiler
def __init__(self, config, is_local, gui_name):
DaemonThread.__init__(self)
if is_local:
find = imp.find_module('plugins')
plugins = imp.load_module('electrum_rby_plugins', *find)
else:
plugins = __import__('electrum_rby_plugins')
self.pkgpath = os.path.dirname(plugins.__file__)
self.config = config
self.hw_wallets = {}
self.plugins = {}
self.gui_name = gui_name
self.descriptions = {}
self.device_manager = DeviceMgr(config)
self.load_plugins()
self.add_jobs(self.device_manager.thread_jobs())
self.start()
def load_plugins(self):
for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]):
# do not load deprecated plugins
if name in ['plot', 'exchange_rate']:
continue
m = loader.find_module(name).load_module(name)
d = m.__dict__
gui_good = self.gui_name in d.get('available_for', [])
if not gui_good:
continue
details = d.get('registers_wallet_type')
if details:
self.register_wallet_type(name, gui_good, details)
details = d.get('registers_keystore')
if details:
self.register_keystore(name, gui_good, details)
self.descriptions[name] = d
if not d.get('requires_wallet_type') and self.config.get('use_' + name):
try:
self.load_plugin(name)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.print_error("cannot initialize plugin %s:" % name, str(e))
def get(self, name):
return self.plugins.get(name)
def count(self):
return len(self.plugins)
def load_plugin(self, name):
if name in self.plugins:
return self.plugins[name]
full_name = 'electrum_rby_plugins.' + name + '.' + self.gui_name
loader = pkgutil.find_loader(full_name)
if not loader:
raise RuntimeError("%s implementation for %s plugin not found"
% (self.gui_name, name))
p = loader.load_module(full_name)
plugin = p.Plugin(self, self.config, name)
self.add_jobs(plugin.thread_jobs())
self.plugins[name] = plugin
self.print_error("loaded", name)
return plugin
def close_plugin(self, plugin):
self.remove_jobs(plugin.thread_jobs())
def enable(self, name):
self.config.set_key('use_' + name, True, True)
p = self.get(name)
if p:
return p
return self.load_plugin(name)
def disable(self, name):
self.config.set_key('use_' + name, False, True)
p = self.get(name)
if not p:
return
self.plugins.pop(name)
p.close()
self.print_error("closed", name)
def toggle(self, name):
p = self.get(name)
return self.disable(name) if p else self.enable(name)
def is_available(self, name, w):
d = self.descriptions.get(name)
if not d:
return False
deps = d.get('requires', [])
for dep, s in deps:
try:
__import__(dep)
except ImportError:
return False
requires = d.get('requires_wallet_type', [])
return not requires or w.wallet_type in requires
def get_hardware_support(self):
out = []
for name, (gui_good, details) in self.hw_wallets.items():
if gui_good:
try:
p = self.get_plugin(name)
if p.is_enabled():
out.append([name, details[2], p])
except:
traceback.print_exc()
self.print_error("cannot load plugin for:", name)
return out
def register_wallet_type(self, name, gui_good, wallet_type):
from .wallet import register_wallet_type, register_constructor
self.print_error("registering wallet type", (wallet_type, name))
def loader():
plugin = self.get_plugin(name)
register_constructor(wallet_type, plugin.wallet_class)
register_wallet_type(wallet_type)
plugin_loaders[wallet_type] = loader
def register_keystore(self, name, gui_good, details):
from .keystore import register_keystore
def dynamic_constructor(d):
return self.get_plugin(name).keystore_class(d)
if details[0] == 'hardware':
self.hw_wallets[name] = (gui_good, details)
self.print_error("registering hardware %s: %s" %(name, details))
register_keystore(details[1], dynamic_constructor)
def get_plugin(self, name):
if not name in self.plugins:
self.load_plugin(name)
return self.plugins[name]
def run(self):
while self.is_running():
time.sleep(0.1)
self.run_jobs()
self.on_stop()
def hook(func):
hook_names.add(func.__name__)
return func
def run_hook(name, *args):
results = []
f_list = hooks.get(name, [])
for p, f in f_list:
if p.is_enabled():
try:
r = f(*args)
except Exception:
print_error("Plugin error")
traceback.print_exc(file=sys.stdout)
r = False
if r:
results.append(r)
if results:
assert len(results) == 1, results
return results[0]
class BasePlugin(PrintError):
def __init__(self, parent, config, name):
self.parent = parent # The plugins object
self.name = name
self.config = config
self.wallet = None
# add self to hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.append((self, getattr(self, k)))
hooks[k] = l
def diagnostic_name(self):
return self.name
def __str__(self):
return self.name
def close(self):
# remove self from hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.remove((self, getattr(self, k)))
hooks[k] = l
self.parent.close_plugin(self)
self.on_close()
def on_close(self):
pass
def requires_settings(self):
return False
def thread_jobs(self):
return []
def is_enabled(self):
return self.is_available() and self.config.get('use_'+self.name) is True
def is_available(self):
return True
def settings_dialog(self):
pass
class DeviceNotFoundError(Exception):
pass
class DeviceUnpairableError(Exception):
pass
Device = namedtuple("Device", "path interface_number id_ product_key usage_page")
DeviceInfo = namedtuple("DeviceInfo", "device label initialized")
class DeviceMgr(ThreadJob, PrintError):
'''Manages hardware clients. A client communicates over a hardware
channel with the device.
In addition to tracking device HID IDs, the device manager tracks
hardware wallets and manages wallet pairing. A HID ID may be
paired with a wallet when it is confirmed that the hardware device
matches the wallet, i.e. they have the same master public key. A
HID ID can be unpaired if e.g. it is wiped.
Because of hotplugging, a wallet must request its client
dynamically each time it is required, rather than caching it
itself.
The device manager is shared across plugins, so just one place
does hardware scans when needed. By tracking HID IDs, if a device
is plugged into a different port the wallet is automatically
re-paired.
Wallets are informed on connect / disconnect events. It must
implement connected(), disconnected() callbacks. Being connected
implies a pairing. Callbacks can happen in any thread context,
and we do them without holding the lock.
Confusingly, the HID ID (serial number) reported by the HID system
doesn't match the device ID reported by the device itself. We use
the HID IDs.
This plugin is thread-safe. Currently only devices supported by
hidapi are implemented.'''
def __init__(self, config):
super(DeviceMgr, self).__init__()
# Keyed by xpub. The value is the device id
# has been paired, and None otherwise.
self.xpub_ids = {}
# A list of clients. The key is the client, the value is
# a (path, id_) pair.
self.clients = {}
# What we recognise. Each entry is a (vendor_id, product_id)
# pair.
self.recognised_hardware = set()
# For synchronization
self.lock = threading.RLock()
self.hid_lock = threading.RLock()
self.config = config
def thread_jobs(self):
# Thread job to handle device timeouts
return [self]
def run(self):
'''Handle device timeouts. Runs in the context of the Plugins
thread.'''
with self.lock:
clients = list(self.clients.keys())
cutoff = time.time() - self.config.get_session_timeout()
for client in clients:
client.timeout(cutoff)
def register_devices(self, device_pairs):
for pair in device_pairs:
self.recognised_hardware.add(pair)
def create_client(self, device, handler, plugin):
# Get from cache first
client = self.client_lookup(device.id_)
if client:
return client
client = plugin.create_client(device, handler)
if client:
self.print_error("Registering", client)
with self.lock:
self.clients[client] = (device.path, device.id_)
return client
def xpub_id(self, xpub):
with self.lock:
return self.xpub_ids.get(xpub)
def xpub_by_id(self, id_):
with self.lock:
for xpub, xpub_id in self.xpub_ids.items():
if xpub_id == id_:
return xpub
return None
def unpair_xpub(self, xpub):
with self.lock:
if not xpub in self.xpub_ids:
return
_id = self.xpub_ids.pop(xpub)
client = self.client_lookup(_id)
self.clients.pop(client, None)
if client:
client.close()
def unpair_id(self, id_):
xpub = self.xpub_by_id(id_)
if xpub:
self.unpair_xpub(xpub)
def pair_xpub(self, xpub, id_):
with self.lock:
self.xpub_ids[xpub] = id_
def client_lookup(self, id_):
with self.lock:
for client, (path, client_id) in self.clients.items():
if client_id == id_:
return client
return None
def client_by_id(self, id_):
'''Returns a client for the device ID if one is registered. If
a device is wiped or in bootloader mode pairing is impossible;
in such cases we communicate by device ID and not wallet.'''
self.scan_devices()
return self.client_lookup(id_)
def client_for_keystore(self, plugin, handler, keystore, force_pair):
self.print_error("getting client for keystore")
handler.update_status(False)
devices = self.scan_devices()
xpub = keystore.xpub
derivation = keystore.get_derivation()
client = self.client_by_xpub(plugin, xpub, handler, devices)
if client is None and force_pair:
info = self.select_device(plugin, handler, keystore, devices)
client = self.force_pair_xpub(plugin, handler, info, xpub, derivation, devices)
if client:
handler.update_status(True)
self.print_error("end client for keystore")
return client
def client_by_xpub(self, plugin, xpub, handler, devices):
_id = self.xpub_id(xpub)
client = self.client_lookup(_id)
if client:
# An unpaired client might have another wallet's handler
# from a prior scan. Replace to fix dialog parenting.
client.handler = handler
return client
for device in devices:
if device.id_ == _id:
return self.create_client(device, handler, plugin)
def force_pair_xpub(self, plugin, handler, info, xpub, derivation, devices):
# The wallet has not been previously paired, so let the user
# choose an unpaired device and compare its first address.
client = self.client_lookup(info.device.id_)
if client and client.is_pairable():
# See comment above for same code
client.handler = handler
# This will trigger a PIN/passphrase entry request
try:
client_xpub = client.get_xpub(derivation)
except (UserCancelled, RuntimeError):
# Bad / cancelled PIN / passphrase
client_xpub = None
if client_xpub == xpub:
self.pair_xpub(xpub, info.device.id_)
return client
# The user input has wrong PIN or passphrase, or cancelled input,
# or it is not pairable
raise DeviceUnpairableError(
_('Electrum cannot pair with your %s.\n\n'
'Before you request rubycoins to be sent to addresses in this '
'wallet, ensure you can pair with your device, or that you have '
'its seed (and passphrase, if any). Otherwise all rubycoins you '
'receive will be unspendable.') % plugin.device)
def unpaired_device_infos(self, handler, plugin, devices=None):
'''Returns a list of DeviceInfo objects: one for each connected,
unpaired device accepted by the plugin.'''
if devices is None:
devices = self.scan_devices()
devices = [dev for dev in devices if not self.xpub_by_id(dev.id_)]
infos = []
for device in devices:
if not device.product_key in plugin.DEVICE_IDS:
continue
client = self.create_client(device, handler, plugin)
if not client:
continue
infos.append(DeviceInfo(device, client.label(), client.is_initialized()))
return infos
def select_device(self, plugin, handler, keystore, devices=None):
'''Ask the user to select a device to use if there is more than one,
and return the DeviceInfo for the device.'''
while True:
infos = self.unpaired_device_infos(handler, plugin, devices)
if infos:
break
msg = _('Could not connect to your %s. Verify the cable is '
'connected and that no other application is using it.\n\n'
'Try to connect again?') % plugin.device
if not handler.yes_no_question(msg):
raise UserCancelled()
devices = None
if len(infos) == 1:
return infos[0]
# select device by label
for info in infos:
if info.label == keystore.label:
return info
msg = _("Please select which %s device to use:") % plugin.device
descriptions = [info.label + ' (%s)'%(_("initialized") if info.initialized else _("wiped")) for info in infos]
c = handler.query_choice(msg, descriptions)
if c is None:
raise UserCancelled()
info = infos[c]
# save new label
keystore.set_label(info.label)
handler.win.wallet.save_keystore()
return info
def scan_devices(self):
# All currently supported hardware libraries use hid, so we
# assume it here. This can be easily abstracted if necessary.
# Note this import must be local so those without hardware
# wallet libraries are not affected.
import hid
self.print_error("scanning devices...")
with self.hid_lock:
hid_list = hid.enumerate(0, 0)
# First see what's connected that we know about
devices = []
for d in hid_list:
product_key = (d['vendor_id'], d['product_id'])
if product_key in self.recognised_hardware:
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', -1)
usage_page = d['usage_page']
id_ = d['serial_number']
if len(id_) == 0:
id_ = str(d['path'])
id_ += str(interface_number) + str(usage_page)
devices.append(Device(d['path'], interface_number,
id_, product_key, usage_page))
# Now find out what was disconnected
pairs = [(dev.path, dev.id_) for dev in devices]
disconnected_ids = []
with self.lock:
connected = {}
for client, pair in self.clients.items():
if pair in pairs:
connected[client] = pair
else:
disconnected_ids.append(pair[1])
self.clients = connected
# Unpair disconnected devices
for id_ in disconnected_ids:
self.unpair_id(id_)
return devices
| 35.551595
| 118
| 0.603145
|
4a02bba8c714acd649803766b340633564522772
| 1,725
|
py
|
Python
|
pioreactor_logs2slack/__init__.py
|
Pioreactor/pioreactor-logs2slack
|
ddf27d850b15291b212f4259fbf540e0b49da02b
|
[
"MIT"
] | null | null | null |
pioreactor_logs2slack/__init__.py
|
Pioreactor/pioreactor-logs2slack
|
ddf27d850b15291b212f4259fbf540e0b49da02b
|
[
"MIT"
] | 1
|
2021-11-11T00:34:49.000Z
|
2021-11-11T00:34:49.000Z
|
pioreactor_logs2slack/__init__.py
|
Pioreactor/pioreactor-logs2slack
|
ddf27d850b15291b212f4259fbf540e0b49da02b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import logging
import click
from requests import post
from pioreactor.background_jobs.base import BackgroundJob
from pioreactor.config import config
from pioreactor.whoami import get_unit_name, get_latest_experiment_name
class Logs2Slack(BackgroundJob):
def __init__(self, unit, experiment):
super(Logs2Slack, self).__init__(
unit=unit, experiment=experiment, job_name="logs2slack"
)
self.slack_webhook_url = config.get("logs2slack", "slack_webhook_url")
self.log_level = config.get("logs2slack", "log_level", fallback="INFO")
if not self.slack_webhook_url:
raise ValueError("[logs2slack] slack_webhook_url is not defined in your config.ini.")
self.start_passive_listeners()
def publish_to_slack(self, msg):
payload = json.loads(msg.payload)
# check to see if we should allow the logs based on the level.
if getattr(logging, self.log_level) > getattr(logging, payload['level']):
return
slack_msg = f"[{payload['level']}] [{self.unit}] [{payload['task']}] {payload['message']}"
encoded_json = json.dumps({"text": slack_msg}).encode("utf-8")
post(
self.slack_webhook_url, data=encoded_json,
headers={'Content-Type': 'application/json'}
)
def start_passive_listeners(self):
self.subscribe_and_callback(self.publish_to_slack, f"pioreactor/{self.unit}/+/logs/+")
@click.command(name="logs2slack")
def click_logs2slack():
"""
turn on logging to Slack
"""
lg = Logs2Slack(
unit=get_unit_name(), experiment=get_latest_experiment_name()
)
lg.block_until_disconnected()
| 31.944444
| 98
| 0.675362
|
4a02bc3ba744bc63f0bb5cc2a2238f885d07bf2b
| 2,761
|
py
|
Python
|
hail/python/hailtop/hailctl/dataproc/submit.py
|
joonan30/hail
|
aa5d906be9d4182b22b73ce6dd5bae5967acf9ce
|
[
"MIT"
] | null | null | null |
hail/python/hailtop/hailctl/dataproc/submit.py
|
joonan30/hail
|
aa5d906be9d4182b22b73ce6dd5bae5967acf9ce
|
[
"MIT"
] | null | null | null |
hail/python/hailtop/hailctl/dataproc/submit.py
|
joonan30/hail
|
aa5d906be9d4182b22b73ce6dd5bae5967acf9ce
|
[
"MIT"
] | null | null | null |
from subprocess import check_call
import os
import tempfile
import zipfile
def init_parser(parser):
parser.add_argument('name', type=str, help='Cluster name.')
parser.add_argument('script', type=str)
parser.add_argument('--files', required=False, type=str, help='Comma-separated list of files to add to the working directory of the Hail application.')
parser.add_argument('--pyfiles', required=False, type=str, help='Comma-separated list of files (or directories with python files) to add to the PYTHONPATH.')
parser.add_argument('--properties', '-p', required=False, type=str, help='Extra Spark properties to set.')
parser.add_argument('--dry-run', action='store_true', help="Print gcloud dataproc command, but don't run it.")
def main(args, pass_through_args): # pylint: disable=unused-argument
print("Submitting to cluster '{}'...".format(args.name))
# create files argument
files = ''
if args.files:
files = args.files
pyfiles = []
if args.pyfiles:
pyfiles.extend(args.pyfiles.split(','))
pyfiles.extend(os.environ.get('HAIL_SCRIPTS', '').split(':'))
if pyfiles:
tfile = tempfile.mkstemp(suffix='.zip', prefix='pyscripts_')[1]
zipf = zipfile.ZipFile(tfile, 'w', zipfile.ZIP_DEFLATED)
for hail_script_entry in pyfiles:
if hail_script_entry.endswith('.py'):
zipf.write(hail_script_entry, arcname=os.path.basename(hail_script_entry))
else:
for root, _, pyfiles_walk in os.walk(hail_script_entry):
for pyfile in pyfiles_walk:
if pyfile.endswith('.py'):
zipf.write(os.path.join(root, pyfile),
os.path.relpath(os.path.join(root, pyfile),
os.path.join(hail_script_entry, '..')))
zipf.close()
pyfiles = tfile
else:
pyfiles = ''
# create properties argument
properties = ''
if args.properties:
properties = args.properties
# pyspark submit command
cmd = [
'gcloud',
'dataproc',
'jobs',
'submit',
'pyspark',
args.script,
'--cluster={}'.format(args.name),
'--files={}'.format(files),
'--py-files={}'.format(pyfiles),
'--properties={}'.format(properties)
]
# append arguments to pass to the Hail script
if pass_through_args:
cmd.append('--')
cmd.extend(pass_through_args)
# print underlying gcloud command
print('gcloud command:')
print(' '.join(cmd[:6]) + ' \\\n ' + ' \\\n '.join(cmd[6:]))
# submit job
if not args.dry_run:
check_call(cmd)
| 36.328947
| 161
| 0.594712
|
4a02bcd08bb896a55119557307037e8f649f3c33
| 6,971
|
py
|
Python
|
tests/test_spec_frequency_mask.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 930
|
2019-02-14T10:21:22.000Z
|
2022-03-31T03:49:48.000Z
|
tests/test_spec_frequency_mask.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 169
|
2019-02-12T21:16:14.000Z
|
2022-03-18T07:53:43.000Z
|
tests/test_spec_frequency_mask.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 122
|
2019-02-26T05:12:45.000Z
|
2022-03-24T08:45:51.000Z
|
import os
import unittest
import librosa
import numpy as np
from audiomentations import SpecFrequencyMask
from audiomentations.core.audio_loading_utils import load_sound_file
from demo.demo import DEMO_DIR
from .utils import plot_matrix
DEBUG = False
class TestSpecFrequencyMask(unittest.TestCase):
def test_fill_zeros(self):
samples, sample_rate = load_sound_file(
os.path.join(DEMO_DIR, "acoustic_guitar_0.wav"), sample_rate=None
)
magnitude_spectrogram = librosa.feature.melspectrogram(
y=samples, sr=sample_rate
)
mask_fraction = 0.05
transform = SpecFrequencyMask(
fill_mode="constant",
fill_constant=0.0,
min_mask_fraction=mask_fraction,
max_mask_fraction=mask_fraction,
p=1.0,
)
augmented_spectrogram = transform(magnitude_spectrogram)
if DEBUG:
plot_matrix(np.log(augmented_spectrogram))
with np.testing.assert_raises(AssertionError):
np.testing.assert_array_equal(augmented_spectrogram, magnitude_spectrogram)
num_zeroed_frequencies = 0
for i in range(augmented_spectrogram.shape[0]):
if sum(augmented_spectrogram[i]) == 0.0:
num_zeroed_frequencies += 1
self.assertEqual(
num_zeroed_frequencies,
int(round(magnitude_spectrogram.shape[0] * mask_fraction)),
)
def test_fill_zeros_multichannel(self):
samples, sample_rate = load_sound_file(
os.path.join(DEMO_DIR, "background_noises", "hens.ogg"),
sample_rate=None,
mono=False,
)
assert samples.shape[0] == 2
magnitude_spectrogram_chn0 = librosa.feature.melspectrogram(
y=np.asfortranarray(samples[0, :]), sr=sample_rate
)
magnitude_spectrogram_chn1 = librosa.feature.melspectrogram(
y=np.asfortranarray(samples[1, :]), sr=sample_rate
)
multichannel_magnitude_spectrogram = np.zeros(
shape=(
magnitude_spectrogram_chn0.shape[0],
magnitude_spectrogram_chn0.shape[1],
3,
),
dtype=np.float32,
)
multichannel_magnitude_spectrogram[:, :, 0] = magnitude_spectrogram_chn0
multichannel_magnitude_spectrogram[:, :, 1] = magnitude_spectrogram_chn1
multichannel_magnitude_spectrogram[:, :, 2] = magnitude_spectrogram_chn1
mask_fraction = 0.05
transform = SpecFrequencyMask(
fill_mode="constant",
fill_constant=0.0,
min_mask_fraction=mask_fraction,
max_mask_fraction=mask_fraction,
p=1.0,
)
augmented_spectrogram = transform(multichannel_magnitude_spectrogram)
if DEBUG:
image = (7 + np.log10(augmented_spectrogram + 0.0000001)) / 8
plot_matrix(image)
with np.testing.assert_raises(AssertionError):
np.testing.assert_array_equal(
augmented_spectrogram, multichannel_magnitude_spectrogram
)
num_zeroed_frequencies = 0
for i in range(augmented_spectrogram.shape[0]):
if np.sum(augmented_spectrogram[i]) == 0.0:
num_zeroed_frequencies += 1
self.assertEqual(
num_zeroed_frequencies,
int(round(multichannel_magnitude_spectrogram.shape[0] * mask_fraction)),
)
def test_fill_mean(self):
samples, sample_rate = load_sound_file(
os.path.join(DEMO_DIR, "acoustic_guitar_0.wav"), sample_rate=None
)
magnitude_spectrogram = librosa.feature.melspectrogram(
y=samples, sr=sample_rate
)
min_mask_fraction = 0.05
max_mask_fraction = 0.09
transform = SpecFrequencyMask(
fill_mode="mean",
min_mask_fraction=min_mask_fraction,
max_mask_fraction=max_mask_fraction,
p=1.0,
)
augmented_spectrogram = transform(magnitude_spectrogram)
if DEBUG:
plot_matrix(np.log(augmented_spectrogram))
num_masked_frequencies = 0
for i in range(augmented_spectrogram.shape[0]):
frequency_slice = augmented_spectrogram[i]
if (
np.amin(frequency_slice) == np.amax(frequency_slice)
and sum(frequency_slice) != 0.0
):
num_masked_frequencies += 1
self.assertGreaterEqual(
num_masked_frequencies,
int(round(magnitude_spectrogram.shape[0] * min_mask_fraction)),
)
self.assertLessEqual(
num_masked_frequencies,
int(round(magnitude_spectrogram.shape[0] * max_mask_fraction)),
)
def test_fill_mean_multichannel(self):
samples, sample_rate = load_sound_file(
os.path.join(DEMO_DIR, "background_noises", "hens.ogg"),
sample_rate=None,
mono=False,
)
assert samples.shape[0] == 2
magnitude_spectrogram_chn0 = librosa.feature.melspectrogram(
y=np.asfortranarray(samples[0, :]), sr=sample_rate
)
magnitude_spectrogram_chn1 = librosa.feature.melspectrogram(
y=np.asfortranarray(samples[1, :]), sr=sample_rate
)
multichannel_magnitude_spectrogram = np.zeros(
shape=(
magnitude_spectrogram_chn0.shape[0],
magnitude_spectrogram_chn0.shape[1],
3,
),
dtype=np.float32,
)
multichannel_magnitude_spectrogram[:, :, 0] = magnitude_spectrogram_chn0
multichannel_magnitude_spectrogram[:, :, 1] = magnitude_spectrogram_chn1
multichannel_magnitude_spectrogram[:, :, 2] = magnitude_spectrogram_chn1
mask_fraction = 0.05
transform = SpecFrequencyMask(
fill_mode="mean",
min_mask_fraction=mask_fraction,
max_mask_fraction=mask_fraction,
p=1.0,
)
augmented_spectrogram = transform(multichannel_magnitude_spectrogram)
if DEBUG:
image = (7 + np.log10(augmented_spectrogram + 0.0000001)) / 8
plot_matrix(image)
with np.testing.assert_raises(AssertionError):
np.testing.assert_array_equal(
augmented_spectrogram, multichannel_magnitude_spectrogram
)
num_masked_frequencies = 0
for i in range(augmented_spectrogram.shape[0]):
frequency_slice = augmented_spectrogram[i]
if (
np.amin(frequency_slice) == np.amax(frequency_slice)
and np.sum(frequency_slice) != 0.0
):
num_masked_frequencies += 1
self.assertEqual(
num_masked_frequencies,
int(round(multichannel_magnitude_spectrogram.shape[0] * mask_fraction)),
)
| 35.030151
| 87
| 0.620571
|
4a02bd9401180eb17b602beea384160b64772ea1
| 12,247
|
py
|
Python
|
cogs/follow.py
|
smehlhoff/twitch-discord-bot
|
e1b91b589f47fe61a2d07efaf712225514ae2801
|
[
"MIT"
] | null | null | null |
cogs/follow.py
|
smehlhoff/twitch-discord-bot
|
e1b91b589f47fe61a2d07efaf712225514ae2801
|
[
"MIT"
] | null | null | null |
cogs/follow.py
|
smehlhoff/twitch-discord-bot
|
e1b91b589f47fe61a2d07efaf712225514ae2801
|
[
"MIT"
] | null | null | null |
import locale
import textwrap
import discord
from discord.ext import commands
from sqlalchemy import exc
from core.models import session, Follows
from core.utils import check_follows_exist, twitch_api_call, twitch_convert_timestamp, twitch_channel_uptime, \
retrieve_twitch_channel_id, retrieve_twitch_game, retrieve_twitch_channel_name, embed_message
class Follow:
"""
Bot commands to handle channel follows for twitch.tv
"""
def __init__(self, bot):
self.bot = bot
@commands.group()
async def follows(self, ctx):
"""
List followed channels
"""
user_id = ctx.message.author.id
if ctx.invoked_subcommand is None:
follows = check_follows_exist(user_id, channel=None)
if follows:
channels = []
for count, follow in enumerate(follows, start=1):
channels.append(f'{count}. {follow.channel}')
embed = discord.Embed()
embed.add_field(name='Channel Follows',
value=' \n'.join(channels))
embed.set_footer(text=f'Requested by {ctx.message.author}')
embed.colour = 0x738bd7
await ctx.send(embed=embed)
else:
await embed_message(ctx, message_type='Error', message='No channels saved in database')
@follows.command(aliases=['import'])
async def _import(self, ctx, twitch_username: str):
"""
Import channel follows from twitch username
"""
user_id = ctx.message.author.id
username = ctx.message.author.name
twitch_user_id = await retrieve_twitch_channel_id(ctx, twitch_username)
if twitch_user_id:
data = await twitch_api_call(ctx, endpoint='users/follows?from_id=', channel=twitch_user_id, params='')
if data:
objects = []
for follow in data['data']:
channel_id = follow['to_id']
channel_name = await retrieve_twitch_channel_name(ctx, channel_id)
follows = check_follows_exist(user_id, channel_name)
if follows:
pass
else:
objects.append(
Follows(
user_id=user_id,
username=username,
channel=channel_name,
channel_id=channel_id
)
)
if len(objects) == 0:
await embed_message(
ctx,
message_type='Error',
message='Channel follows already saved in database'
)
else:
try:
session.add_all(objects)
session.commit()
await embed_message(
ctx,
message_type='Success',
message=f'Imported {len(objects)} channel follows to database'
)
except exc.OperationalError:
session.rollback()
await embed_message(
ctx,
message_type='Error',
message='Cannot save channel follows to database'
)
@follows.command()
async def add(self, ctx, *channels: str):
"""
Add channel follow
"""
user_id = ctx.message.author.id
username = ctx.message.author.name
for channel in channels:
follows = check_follows_exist(user_id, channel)
if follows:
await embed_message(
ctx,
message_type='Error',
message=f'Channel {channel} is already saved in database'
)
else:
channel_id = await retrieve_twitch_channel_id(ctx, channel)
if channel_id:
try:
new_follow = Follows(
user_id=user_id, username=username, channel=channel, channel_id=channel_id)
session.add(new_follow)
session.commit()
await embed_message(
ctx,
message_type='Success',
message=f'Channel {channel} has been saved to database'
)
except exc.OperationalError:
session.rollback()
await embed_message(
ctx,
message_type='Error',
message=f'Cannot save channel {channel} to database'
)
@follows.command()
async def live(self, ctx):
"""
List viewer count for live followed channels
"""
user_id = ctx.message.author.id
follows = check_follows_exist(user_id, channel=None)
if follows:
channels = []
count = 0
for follow in follows:
data = await twitch_api_call(
ctx,
endpoint='streams?user_id=',
channel=follow.channel_id,
params='&type=live'
)
try:
viewers = locale.format(
'%d', data['data'][0]['viewer_count'], grouping=True)
count += 1
channels.append(f'{count}. {follow.channel}')
channels.append(viewers)
except IndexError:
pass
if count == 0:
await embed_message(ctx, message_type='Error', message='No channels are currently live')
else:
embed = discord.Embed()
embed.add_field(name='Live Channels',
value=' \n'.join(channels[0::2]))
embed.add_field(
name='Viewers', value=' \n'.join(channels[1::2]))
embed.set_footer(text=f'Requested by {ctx.message.author}')
embed.colour = 0x738bd7
await ctx.send(embed=embed)
else:
await embed_message(ctx, message_type='Error', message='No channels saved in database')
@follows.command()
async def uptime(self, ctx):
"""
List uptime for live followed channels
"""
user_id = ctx.message.author.id
follows = check_follows_exist(user_id, channel=None)
if follows:
channels = []
count = 0
for follow in follows:
data = await twitch_api_call(
ctx,
endpoint='streams?user_id=',
channel=follow.channel_id,
params='&type=live'
)
try:
uptime = twitch_convert_timestamp(
data['data'][0]['started_at'])
uptime = twitch_channel_uptime(uptime)
count += 1
channels.append(f'{count}. {follow.channel}')
channels.append(str(uptime))
except IndexError:
pass
if count == 0:
await embed_message(ctx, message_type='Error', message='No channels are currently live')
else:
embed = discord.Embed()
embed.add_field(name='Live Channels',
value=' \n'.join(channels[0::2]))
embed.add_field(
name='Uptime', value=' \n'.join(channels[1::2]))
embed.set_footer(text=f'Requested by {ctx.message.author}')
embed.colour = 0x738bd7
await ctx.send(embed=embed)
else:
await embed_message(ctx, message_type='Error', message='No channels saved in database')
@follows.command()
async def game(self, ctx):
"""
List game title for live followed channels
"""
user_id = ctx.message.author.id
follows = check_follows_exist(user_id, channel=None)
if follows:
channels = []
count = 0
for follow in follows:
data = await twitch_api_call(
ctx,
endpoint='streams?user_id=',
channel=follow.channel_id,
params='&type=live'
)
try:
game = data['data'][0]['game_id']
game = await retrieve_twitch_game(ctx, game)
game = textwrap.shorten(game, width=60, placeholder="...")
count += 1
channels.append(f'{count}. {follow.channel}')
channels.append(game)
except IndexError:
pass
if count == 0:
await embed_message(ctx, message_type='Error', message='No channels are currently live')
else:
embed = discord.Embed()
embed.add_field(name='Live Channels',
value=' \n'.join(channels[0::2]))
embed.add_field(name='Game', value=' \n'.join(channels[1::2]))
embed.set_footer(text=f'Requested by {ctx.message.author}')
embed.colour = 0x738bd7
await ctx.send(embed=embed)
else:
await embed_message(ctx, message_type='Error', message='No channels saved in database')
@follows.command(aliases=['delete'])
async def remove(self, ctx, *channels: str):
"""
Remove channel follows
"""
user_id = ctx.message.author.id
for channel in channels:
follows = check_follows_exist(user_id, channel)
if follows:
try:
for follow in follows:
session.delete(follow)
session.commit()
await embed_message(
ctx,
message_type='Success',
message=f'Channel {channel} has been removed from database'
)
except exc.OperationalError:
session.rollback()
await embed_message(
ctx,
message_type='Error',
message=f'Cannot remove channel {channel} in database'
)
else:
await embed_message(
ctx,
message_type='Error',
message=f'Channel {channel} is not saved in database'
)
@follows.command(aliases=['removeall', 'deleteall'])
async def remove_all(self, ctx):
"""
Remove all channel follows
"""
user_id = ctx.message.author.id
follows = check_follows_exist(user_id, channel=None)
if follows:
try:
for follow in follows:
session.delete(follow)
session.commit()
await embed_message(
ctx,
message_type='Success',
message=f'Removed {len(follows)} channel follows from database'
)
except exc.OperationalError:
session.rollback()
await embed_message(
ctx,
message_type='Error',
message='Cannot remove channel follows in database'
)
else:
return await embed_message(
ctx,
message_type='Error',
message='No channels saved in database'
)
def setup(bot):
bot.add_cog(Follow(bot))
| 38.152648
| 115
| 0.477015
|
4a02bdb4f88c44684854b4451ee6fa39f0e995dd
| 22,710
|
py
|
Python
|
sdk/python/pulumi_azure_native/apimanagement/v20200601preview/api_diagnostic.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/apimanagement/v20200601preview/api_diagnostic.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/apimanagement/v20200601preview/api_diagnostic.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ApiDiagnosticArgs', 'ApiDiagnostic']
@pulumi.input_type
class ApiDiagnosticArgs:
def __init__(__self__, *,
api_id: pulumi.Input[str],
logger_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
always_log: Optional[pulumi.Input[Union[str, 'AlwaysLog']]] = None,
backend: Optional[pulumi.Input['PipelineDiagnosticSettingsArgs']] = None,
diagnostic_id: Optional[pulumi.Input[str]] = None,
frontend: Optional[pulumi.Input['PipelineDiagnosticSettingsArgs']] = None,
http_correlation_protocol: Optional[pulumi.Input[Union[str, 'HttpCorrelationProtocol']]] = None,
log_client_ip: Optional[pulumi.Input[bool]] = None,
operation_name_format: Optional[pulumi.Input[Union[str, 'OperationNameFormat']]] = None,
sampling: Optional[pulumi.Input['SamplingSettingsArgs']] = None,
verbosity: Optional[pulumi.Input[Union[str, 'Verbosity']]] = None):
"""
The set of arguments for constructing a ApiDiagnostic resource.
:param pulumi.Input[str] api_id: API identifier. Must be unique in the current API Management service instance.
:param pulumi.Input[str] logger_id: Resource Id of a target logger.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[Union[str, 'AlwaysLog']] always_log: Specifies for what type of messages sampling settings should not apply.
:param pulumi.Input['PipelineDiagnosticSettingsArgs'] backend: Diagnostic settings for incoming/outgoing HTTP messages to the Backend
:param pulumi.Input[str] diagnostic_id: Diagnostic identifier. Must be unique in the current API Management service instance.
:param pulumi.Input['PipelineDiagnosticSettingsArgs'] frontend: Diagnostic settings for incoming/outgoing HTTP messages to the Gateway.
:param pulumi.Input[Union[str, 'HttpCorrelationProtocol']] http_correlation_protocol: Sets correlation protocol to use for Application Insights diagnostics.
:param pulumi.Input[bool] log_client_ip: Log the ClientIP. Default is false.
:param pulumi.Input[Union[str, 'OperationNameFormat']] operation_name_format: The format of the Operation Name for Application Insights telemetries. Default is Name.
:param pulumi.Input['SamplingSettingsArgs'] sampling: Sampling settings for Diagnostic.
:param pulumi.Input[Union[str, 'Verbosity']] verbosity: The verbosity level applied to traces emitted by trace policies.
"""
pulumi.set(__self__, "api_id", api_id)
pulumi.set(__self__, "logger_id", logger_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if always_log is not None:
pulumi.set(__self__, "always_log", always_log)
if backend is not None:
pulumi.set(__self__, "backend", backend)
if diagnostic_id is not None:
pulumi.set(__self__, "diagnostic_id", diagnostic_id)
if frontend is not None:
pulumi.set(__self__, "frontend", frontend)
if http_correlation_protocol is not None:
pulumi.set(__self__, "http_correlation_protocol", http_correlation_protocol)
if log_client_ip is not None:
pulumi.set(__self__, "log_client_ip", log_client_ip)
if operation_name_format is not None:
pulumi.set(__self__, "operation_name_format", operation_name_format)
if sampling is not None:
pulumi.set(__self__, "sampling", sampling)
if verbosity is not None:
pulumi.set(__self__, "verbosity", verbosity)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Input[str]:
"""
API identifier. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "api_id")
@api_id.setter
def api_id(self, value: pulumi.Input[str]):
pulumi.set(self, "api_id", value)
@property
@pulumi.getter(name="loggerId")
def logger_id(self) -> pulumi.Input[str]:
"""
Resource Id of a target logger.
"""
return pulumi.get(self, "logger_id")
@logger_id.setter
def logger_id(self, value: pulumi.Input[str]):
pulumi.set(self, "logger_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="alwaysLog")
def always_log(self) -> Optional[pulumi.Input[Union[str, 'AlwaysLog']]]:
"""
Specifies for what type of messages sampling settings should not apply.
"""
return pulumi.get(self, "always_log")
@always_log.setter
def always_log(self, value: Optional[pulumi.Input[Union[str, 'AlwaysLog']]]):
pulumi.set(self, "always_log", value)
@property
@pulumi.getter
def backend(self) -> Optional[pulumi.Input['PipelineDiagnosticSettingsArgs']]:
"""
Diagnostic settings for incoming/outgoing HTTP messages to the Backend
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: Optional[pulumi.Input['PipelineDiagnosticSettingsArgs']]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="diagnosticId")
def diagnostic_id(self) -> Optional[pulumi.Input[str]]:
"""
Diagnostic identifier. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "diagnostic_id")
@diagnostic_id.setter
def diagnostic_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "diagnostic_id", value)
@property
@pulumi.getter
def frontend(self) -> Optional[pulumi.Input['PipelineDiagnosticSettingsArgs']]:
"""
Diagnostic settings for incoming/outgoing HTTP messages to the Gateway.
"""
return pulumi.get(self, "frontend")
@frontend.setter
def frontend(self, value: Optional[pulumi.Input['PipelineDiagnosticSettingsArgs']]):
pulumi.set(self, "frontend", value)
@property
@pulumi.getter(name="httpCorrelationProtocol")
def http_correlation_protocol(self) -> Optional[pulumi.Input[Union[str, 'HttpCorrelationProtocol']]]:
"""
Sets correlation protocol to use for Application Insights diagnostics.
"""
return pulumi.get(self, "http_correlation_protocol")
@http_correlation_protocol.setter
def http_correlation_protocol(self, value: Optional[pulumi.Input[Union[str, 'HttpCorrelationProtocol']]]):
pulumi.set(self, "http_correlation_protocol", value)
@property
@pulumi.getter(name="logClientIp")
def log_client_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Log the ClientIP. Default is false.
"""
return pulumi.get(self, "log_client_ip")
@log_client_ip.setter
def log_client_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "log_client_ip", value)
@property
@pulumi.getter(name="operationNameFormat")
def operation_name_format(self) -> Optional[pulumi.Input[Union[str, 'OperationNameFormat']]]:
"""
The format of the Operation Name for Application Insights telemetries. Default is Name.
"""
return pulumi.get(self, "operation_name_format")
@operation_name_format.setter
def operation_name_format(self, value: Optional[pulumi.Input[Union[str, 'OperationNameFormat']]]):
pulumi.set(self, "operation_name_format", value)
@property
@pulumi.getter
def sampling(self) -> Optional[pulumi.Input['SamplingSettingsArgs']]:
"""
Sampling settings for Diagnostic.
"""
return pulumi.get(self, "sampling")
@sampling.setter
def sampling(self, value: Optional[pulumi.Input['SamplingSettingsArgs']]):
pulumi.set(self, "sampling", value)
@property
@pulumi.getter
def verbosity(self) -> Optional[pulumi.Input[Union[str, 'Verbosity']]]:
"""
The verbosity level applied to traces emitted by trace policies.
"""
return pulumi.get(self, "verbosity")
@verbosity.setter
def verbosity(self, value: Optional[pulumi.Input[Union[str, 'Verbosity']]]):
pulumi.set(self, "verbosity", value)
class ApiDiagnostic(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
always_log: Optional[pulumi.Input[Union[str, 'AlwaysLog']]] = None,
api_id: Optional[pulumi.Input[str]] = None,
backend: Optional[pulumi.Input[pulumi.InputType['PipelineDiagnosticSettingsArgs']]] = None,
diagnostic_id: Optional[pulumi.Input[str]] = None,
frontend: Optional[pulumi.Input[pulumi.InputType['PipelineDiagnosticSettingsArgs']]] = None,
http_correlation_protocol: Optional[pulumi.Input[Union[str, 'HttpCorrelationProtocol']]] = None,
log_client_ip: Optional[pulumi.Input[bool]] = None,
logger_id: Optional[pulumi.Input[str]] = None,
operation_name_format: Optional[pulumi.Input[Union[str, 'OperationNameFormat']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sampling: Optional[pulumi.Input[pulumi.InputType['SamplingSettingsArgs']]] = None,
service_name: Optional[pulumi.Input[str]] = None,
verbosity: Optional[pulumi.Input[Union[str, 'Verbosity']]] = None,
__props__=None):
"""
Diagnostic details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'AlwaysLog']] always_log: Specifies for what type of messages sampling settings should not apply.
:param pulumi.Input[str] api_id: API identifier. Must be unique in the current API Management service instance.
:param pulumi.Input[pulumi.InputType['PipelineDiagnosticSettingsArgs']] backend: Diagnostic settings for incoming/outgoing HTTP messages to the Backend
:param pulumi.Input[str] diagnostic_id: Diagnostic identifier. Must be unique in the current API Management service instance.
:param pulumi.Input[pulumi.InputType['PipelineDiagnosticSettingsArgs']] frontend: Diagnostic settings for incoming/outgoing HTTP messages to the Gateway.
:param pulumi.Input[Union[str, 'HttpCorrelationProtocol']] http_correlation_protocol: Sets correlation protocol to use for Application Insights diagnostics.
:param pulumi.Input[bool] log_client_ip: Log the ClientIP. Default is false.
:param pulumi.Input[str] logger_id: Resource Id of a target logger.
:param pulumi.Input[Union[str, 'OperationNameFormat']] operation_name_format: The format of the Operation Name for Application Insights telemetries. Default is Name.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SamplingSettingsArgs']] sampling: Sampling settings for Diagnostic.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[Union[str, 'Verbosity']] verbosity: The verbosity level applied to traces emitted by trace policies.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApiDiagnosticArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Diagnostic details.
:param str resource_name: The name of the resource.
:param ApiDiagnosticArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApiDiagnosticArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
always_log: Optional[pulumi.Input[Union[str, 'AlwaysLog']]] = None,
api_id: Optional[pulumi.Input[str]] = None,
backend: Optional[pulumi.Input[pulumi.InputType['PipelineDiagnosticSettingsArgs']]] = None,
diagnostic_id: Optional[pulumi.Input[str]] = None,
frontend: Optional[pulumi.Input[pulumi.InputType['PipelineDiagnosticSettingsArgs']]] = None,
http_correlation_protocol: Optional[pulumi.Input[Union[str, 'HttpCorrelationProtocol']]] = None,
log_client_ip: Optional[pulumi.Input[bool]] = None,
logger_id: Optional[pulumi.Input[str]] = None,
operation_name_format: Optional[pulumi.Input[Union[str, 'OperationNameFormat']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sampling: Optional[pulumi.Input[pulumi.InputType['SamplingSettingsArgs']]] = None,
service_name: Optional[pulumi.Input[str]] = None,
verbosity: Optional[pulumi.Input[Union[str, 'Verbosity']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApiDiagnosticArgs.__new__(ApiDiagnosticArgs)
__props__.__dict__["always_log"] = always_log
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__.__dict__["api_id"] = api_id
__props__.__dict__["backend"] = backend
__props__.__dict__["diagnostic_id"] = diagnostic_id
__props__.__dict__["frontend"] = frontend
__props__.__dict__["http_correlation_protocol"] = http_correlation_protocol
__props__.__dict__["log_client_ip"] = log_client_ip
if logger_id is None and not opts.urn:
raise TypeError("Missing required property 'logger_id'")
__props__.__dict__["logger_id"] = logger_id
__props__.__dict__["operation_name_format"] = operation_name_format
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sampling"] = sampling
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["verbosity"] = verbosity
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:ApiDiagnostic"), pulumi.Alias(type_="azure-native:apimanagement/v20210101preview:ApiDiagnostic"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:ApiDiagnostic")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApiDiagnostic, __self__).__init__(
'azure-native:apimanagement/v20200601preview:ApiDiagnostic',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApiDiagnostic':
"""
Get an existing ApiDiagnostic resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ApiDiagnosticArgs.__new__(ApiDiagnosticArgs)
__props__.__dict__["always_log"] = None
__props__.__dict__["backend"] = None
__props__.__dict__["frontend"] = None
__props__.__dict__["http_correlation_protocol"] = None
__props__.__dict__["log_client_ip"] = None
__props__.__dict__["logger_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["operation_name_format"] = None
__props__.__dict__["sampling"] = None
__props__.__dict__["type"] = None
__props__.__dict__["verbosity"] = None
return ApiDiagnostic(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="alwaysLog")
def always_log(self) -> pulumi.Output[Optional[str]]:
"""
Specifies for what type of messages sampling settings should not apply.
"""
return pulumi.get(self, "always_log")
@property
@pulumi.getter
def backend(self) -> pulumi.Output[Optional['outputs.PipelineDiagnosticSettingsResponse']]:
"""
Diagnostic settings for incoming/outgoing HTTP messages to the Backend
"""
return pulumi.get(self, "backend")
@property
@pulumi.getter
def frontend(self) -> pulumi.Output[Optional['outputs.PipelineDiagnosticSettingsResponse']]:
"""
Diagnostic settings for incoming/outgoing HTTP messages to the Gateway.
"""
return pulumi.get(self, "frontend")
@property
@pulumi.getter(name="httpCorrelationProtocol")
def http_correlation_protocol(self) -> pulumi.Output[Optional[str]]:
"""
Sets correlation protocol to use for Application Insights diagnostics.
"""
return pulumi.get(self, "http_correlation_protocol")
@property
@pulumi.getter(name="logClientIp")
def log_client_ip(self) -> pulumi.Output[Optional[bool]]:
"""
Log the ClientIP. Default is false.
"""
return pulumi.get(self, "log_client_ip")
@property
@pulumi.getter(name="loggerId")
def logger_id(self) -> pulumi.Output[str]:
"""
Resource Id of a target logger.
"""
return pulumi.get(self, "logger_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operationNameFormat")
def operation_name_format(self) -> pulumi.Output[Optional[str]]:
"""
The format of the Operation Name for Application Insights telemetries. Default is Name.
"""
return pulumi.get(self, "operation_name_format")
@property
@pulumi.getter
def sampling(self) -> pulumi.Output[Optional['outputs.SamplingSettingsResponse']]:
"""
Sampling settings for Diagnostic.
"""
return pulumi.get(self, "sampling")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def verbosity(self) -> pulumi.Output[Optional[str]]:
"""
The verbosity level applied to traces emitted by trace policies.
"""
return pulumi.get(self, "verbosity")
| 49.049676
| 1,498
| 0.671907
|
4a02be4a7c201789ba63099add8f660b14f424d0
| 401
|
py
|
Python
|
nanome/_internal/_volumetric/_volume_data.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 3
|
2020-07-02T13:08:27.000Z
|
2021-11-24T14:32:53.000Z
|
nanome/_internal/_volumetric/_volume_data.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 11
|
2020-09-14T17:01:47.000Z
|
2022-02-18T04:00:52.000Z
|
nanome/_internal/_volumetric/_volume_data.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 5
|
2020-08-12T16:30:03.000Z
|
2021-12-06T18:04:23.000Z
|
from nanome.util import enums
from . import _UnitCell
class _VolumeData(object):
VolumeType = enums.VolumeType
def __init__(self):
self._data = []
self._width = 0
self._height = 0
self._depth = 0
self._mean = 0.0
self._rmsd = 0.0
self._type = _VolumeData.VolumeType.default
self._name = ""
self._cell = _UnitCell()
| 19.095238
| 51
| 0.586035
|
4a02be56e21466ad79d4febb5791cb1bacbd5c6b
| 418
|
py
|
Python
|
Python/7/HalvingSum/test_halving_sum.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | null | null | null |
Python/7/HalvingSum/test_halving_sum.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | 6
|
2020-02-21T17:01:59.000Z
|
2021-05-04T07:04:41.000Z
|
Python/7/HalvingSum/test_halving_sum.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest import main
from halving_sum import halving_sum
class TestHalvingSum(TestCase):
def test_halving_sum(self):
ptr = [
(25, 47),
(127, 247),
]
for inp, exp in ptr:
with self.subTest(inp=inp, exp=exp):
self.assertEqual(halving_sum(n=inp), exp)
if __name__ == "__main__":
main(verbosity=2)
| 20.9
| 57
| 0.595694
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.