id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1721755 | #
# Analytics server
#
import pickle
import jsonpickle
import platform
import json
import io
import os
import sys
import pika
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
import datetime
sns.set()
from sklearn.metrics import r2_score, median_absolute_error, mean_absolute_error
from sklearn.metrics import median_absolute_error, mean_squared_error, mean_squared_log_error
from scipy.optimize import minimize
# import statsmodels.tsa.api as smt
# import statsmodels.api as sm
# from tqdm import tqdm_notebook
import warnings
warnings.filterwarnings('ignore')
from itertools import product
from analytics_handler import analytics_handler
rabbitMQHost = os.getenv("RABBITMQ_SERVICE_HOST") or "localhost"
analytics_db = analytics_handler()
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
hostname = platform.node()
def plot_moving_average(series, field, window=10, plot_intervals=False, scale=1.96, filename='moving_average.png'):
rolling_mean = series.rolling(window=window).mean()
plt.figure(figsize=(17,8))
plt.title('Moving average - {}\n window size = {}'.format(field, window))
plt.plot(rolling_mean, 'g', label='Rolling mean trend')
#Plot confidence intervals for smoothed values
if plot_intervals:
mae = mean_absolute_error(series[window:], rolling_mean[window:])
deviation = np.std(series[window:] - rolling_mean[window:])
lower_bound = rolling_mean - (mae + scale * deviation)
upper_bound = rolling_mean + (mae + scale * deviation)
plt.plot(upper_bound, 'r--', label='Upper bound / Lower bound')
plt.plot(lower_bound, 'r--')
plt.plot(series[window:], label='Actual values')
plt.legend(loc='best')
plt.grid(True)
img_bytes = io.BytesIO()
plt.savefig(img_bytes, format='png')
img_bytes.seek(0)
return img_bytes
def exponential_smoothing(series, alpha):
if len(series) > 0:
result = [series[0]]
else:
result = []
for n in range(1, len(series)):
result.append(alpha * series[n] + (1 - alpha) * result[n-1])
return result
def plot_exponential_smoothing(series, field, alphas=[0.05,0.3], filename='exponential_smoothing.png'):
plt.figure(figsize=(17, 8))
for alpha in alphas:
plt.plot(exponential_smoothing(series, alpha), label="Alpha {}".format(alpha))
plt.plot(series.values, "c", label = "Actual")
plt.legend(loc="best")
plt.axis('tight')
plt.title('Exponential Smoothing - {}'.format(field))
plt.grid(True)
img_bytes = io.BytesIO()
plt.savefig(img_bytes, format='png')
img_bytes.seek(0)
return img_bytes
def double_exponential_smoothing(series, alpha, beta):
result = [series[0]]
for n in range(1, len(series)+1):
if n == 1:
level, trend = series[0], series[1] - series[0]
if n >= len(series): # forecasting
value = result[-1]
else:
value = series[n]
last_level, level = level, alpha * value + (1 - alpha) * (level + trend)
trend = beta * (level - last_level) + (1 - beta) * trend
result.append(level + trend)
return result
def plot_double_exponential_smoothing(series, field, alphas=[0.9,0.02], betas=[0.9,0.02], filename='double_exponential_smoothing.png'):
plt.figure(figsize=(17, 8))
for alpha in alphas:
for beta in betas:
plt.plot(double_exponential_smoothing(series, alpha, beta), label="Alpha {}, beta {}".format(alpha, beta))
plt.plot(series.values, label = "Actual")
plt.legend(loc="best")
plt.axis('tight')
plt.title('Double Exponential Smoothing - {}'.format(field))
plt.grid(True)
img_bytes = io.BytesIO()
plt.savefig(img_bytes, format='png')
img_bytes.seek(0)
return img_bytes
# def tsplot(y, field, lags=30, figsize=(12, 7), syle='bmh', filename='ts_plot.png'):
# if not isinstance(y, pd.Series):
# y = pd.Series(y)
# with plt.style.context(style='bmh'):
# fig = plt.figure(figsize=figsize)
# layout = (2,2)
# ts_ax = plt.subplot2grid(layout, (0,0), colspan=2)
# acf_ax = plt.subplot2grid(layout, (1,0))
# pacf_ax = plt.subplot2grid(layout, (1,1))
# y.plot(ax=ts_ax)
# p_value = sm.tsa.stattools.adfuller(y)[1]
# ts_ax.set_title('Time Series Analysis Plots - {}\n Dickey-Fuller: p={0:.5f}'.format(field, p_value))
# smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)
# smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)
# plt.tight_layout()
# plt.savefig(filename)
# with Image.open(filename) as image:
# img_bytes = io.BytesIO(image)
# return img_bytes
# def optimize_SARIMA(y, parameters_list, d, D, s):
# """
# Return dataframe with parameters and corresponding AIC
# parameters_list - list with (p, q, P, Q) tuples
# d - integration order
# D - seasonal integration order
# s - length of season
# """
# results = []
# best_aic = float('inf')
# for param in tqdm_notebook(parameters_list):
# try: model = sm.tsa.statespace.SARIMAX(y, order=(param[0], d, param[1]),
# seasonal_order=(param[2], D, param[3], s)).fit(disp=-1)
# except:
# continue
# aic = model.aic
# #Save best model, AIC and parameters
# if aic < best_aic:
# best_model = model
# best_aic = aic
# best_param = param
# results.append([param, model.aic])
# result_table = pd.DataFrame(results)
# result_table.columns = ['parameters', 'aic']
# #Sort in ascending order, lower AIC is better
# result_table = result_table.sort_values(by='aic', ascending=True).reset_index(drop=True)
# return result_table
def receive():
rabbitMQ = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitMQHost))
rabbitMQChannel = rabbitMQ.channel()
rabbitMQChannel.exchange_declare(exchange='toAnalytics',exchange_type='direct')
result = rabbitMQChannel.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
rabbitMQChannel.queue_bind(exchange='toAnalytics', queue=queue_name, routing_key='data')
# rabbitMQChannel.queue_declare(queue="queue_toAnalytics", durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
unpickled = pickle.load(jsonpickle.decode(body))
sendLogs('{} - ANALYTICS {}- Received job for analytics {} at RabbitMQ Host-{}'.format(datetime.datetime.now(), hostname, unpickled, rabbitMQHost))
jobid = unpickled['job_id']
df = unpickled['data']
operation = unpickled['op']
params = unpickled['params']
fieldset = params['fields']
result = []
for i in range(len(fieldset)):
if(operation == 'moving_average'):
result.append(plot_moving_average(df.iloc[:, i], fieldset[i], window=int(params['window'])))
if(operation == 'exponential_smoothing'):
result.append(plot_exponential_smoothing(df.iloc[:, i], fieldset[i], alphas =[params['alpha1'], params['alpha2']]))
if(operation == 'double_exponential_smoothing'):
result.append(plot_double_exponential_smoothing(df.iloc[:, i], fieldset[i] ,alphas=[params['alpha1'], params['alpha2']], betas=[params['beta1'], params['beta2']]))
if(operation == 'ts_plot'):
result.append(tsplot(df.iloc[:, i], fieldset[i], lags=params['lags']))
# print(result)
analytics_db.jobid_result_db.set(jobid,jsonpickle.encode(result))
# ch.basic_ack(delivery_tag=method.delivery_tag)
# if(operation == 'sarima_stats'):
# ps = range(0, 4)
# d = 1
# qs = range(0, 4)
# Ps = range(0, 4)
# D = 1
# Qs = range(0, 4)
# s = 4
# #Create a list with all possible combinations of parameters
# parameters = product(ps, qs, Ps, Qs)
# parameters_list = list(parameters)
# result_table = optimize_SARIMA(df.iloc[:, i], parameters_list, d, D, s)
# p, q, P, Q = result_table.parameters[0]
# best_model = sm.tsa.statespace.SARIMAX(df.iloc[:, i], order=(p, d, q),
# seasonal_order=(P, D, Q, s)).fit(disp=-1)
# print(best_model.summary())
# print(best_model.predict(start=df.iloc[:, i].shape[0], end=df.iloc[:, i].shape[0] + 5))
# print(mean_absolute_percentage_error(df.iloc[:, i][s+d:], best_model.fittedvalues[s+d:]))
rabbitMQChannel.basic_qos(prefetch_count=1)
rabbitMQChannel.basic_consume(queue=queue_name, on_message_callback=callback, auto_ack=True)
# rabbitMQChannel.basic_consume(queue="queue_toAnalytics", on_message_callback=callback)
rabbitMQChannel.start_consuming()
print("done")
def sendLogs(logdata):
rabbitMQ = pika.BlockingConnection(
pika.ConnectionParameters(host=rabbitMQHost))
rabbitMQChannel = rabbitMQ.channel()
rabbitMQChannel.exchange_declare(exchange='logs',
exchange_type='direct')
rabbitMQChannel.basic_publish(exchange='logs',
routing_key='logdata',
body=logdata)
rabbitMQ.close()
if __name__ == '__main__':
try:
receive()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0) | StarcoderdataPython |
98933 | <filename>mask2former_video/video_maskformer_model.py<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import math
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from .modeling.criterion import VideoSetCriterion
from .modeling.matcher import VideoHungarianMatcher
from .utils.memory import retry_if_cuda_oom
logger = logging.getLogger(__name__)
@META_ARCH_REGISTRY.register()
class VideoMaskFormer(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# video
num_frames,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
self.num_frames = num_frames
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
matcher = VideoHungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
)
weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight}
if deep_supervision:
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "masks"]
criterion = VideoSetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": True,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# video
"num_frames": cfg.INPUT.SAMPLING_FRAME_NUM,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = []
for video in batched_inputs:
for frame in video["image"]:
images.append(frame.to(self.device))
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
targets = self.prepare_targets(batched_inputs, images)
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
return losses
else:
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_cls_result = mask_cls_results[0]
# upsample masks
mask_pred_result = retry_if_cuda_oom(F.interpolate)(
mask_pred_results[0],
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=False,
)
del outputs
input_per_image = batched_inputs[0]
image_size = images.image_sizes[0] # image size without padding after data augmentation
height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation
width = input_per_image.get("width", image_size[1])
return retry_if_cuda_oom(self.inference_video)(mask_cls_result, mask_pred_result, image_size, height, width)
def prepare_targets(self, targets, images):
h_pad, w_pad = images.tensor.shape[-2:]
gt_instances = []
for targets_per_video in targets:
_num_instance = len(targets_per_video["instances"][0])
mask_shape = [_num_instance, self.num_frames, h_pad, w_pad]
gt_masks_per_video = torch.zeros(mask_shape, dtype=torch.bool, device=self.device)
gt_ids_per_video = []
for f_i, targets_per_frame in enumerate(targets_per_video["instances"]):
targets_per_frame = targets_per_frame.to(self.device)
h, w = targets_per_frame.image_size
gt_ids_per_video.append(targets_per_frame.gt_ids[:, None])
gt_masks_per_video[:, f_i, :h, :w] = targets_per_frame.gt_masks.tensor
gt_ids_per_video = torch.cat(gt_ids_per_video, dim=1)
valid_idx = (gt_ids_per_video != -1).any(dim=-1)
gt_classes_per_video = targets_per_frame.gt_classes[valid_idx] # N,
gt_ids_per_video = gt_ids_per_video[valid_idx] # N, num_frames
gt_instances.append({"labels": gt_classes_per_video, "ids": gt_ids_per_video})
gt_masks_per_video = gt_masks_per_video[valid_idx].float() # N, num_frames, H, W
gt_instances[-1].update({"masks": gt_masks_per_video})
return gt_instances
def inference_video(self, pred_cls, pred_masks, img_size, output_height, output_width):
if len(pred_cls) > 0:
scores = F.softmax(pred_cls, dim=-1)[:, :-1]
labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1)
# keep top-10 predictions
scores_per_image, topk_indices = scores.flatten(0, 1).topk(10, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = topk_indices // self.sem_seg_head.num_classes
pred_masks = pred_masks[topk_indices]
pred_masks = pred_masks[:, :, : img_size[0], : img_size[1]]
pred_masks = F.interpolate(
pred_masks, size=(output_height, output_width), mode="bilinear", align_corners=False
)
masks = pred_masks > 0.
out_scores = scores_per_image.tolist()
out_labels = labels_per_image.tolist()
out_masks = [m for m in masks.cpu()]
else:
out_scores = []
out_labels = []
out_masks = []
video_output = {
"image_size": (output_height, output_width),
"pred_scores": out_scores,
"pred_labels": out_labels,
"pred_masks": out_masks,
}
return video_output
| StarcoderdataPython |
1691047 | '''
Exercício Python 33: Faça um programa que leia três números e mostre qual é o maior e qual é o menor.
'''
A = int(input('Digite o primeiro valor: '))
B = int(input('Digite o segundo valor: '))
C = int(input('Digite o terceiro valor: '))
Menor = A
if B < A and B < C:
Menor = B
if C < A and C < B:
Menor = C
Maior = A
if B > A and B > C:
Maior = B
if C > A and C > B:
Maior = C
print('\nmenor valor digitado foi {}'.format(Menor))
print('\nO maior valor digitado foi {}'.format(Maior))
| StarcoderdataPython |
3302047 | """
Contains functions mplementing different numerical integration schemes.
AUTHOR: <NAME>
DATE: 2020-01-17
"""
import sys
import numpy as np
from scipy.integrate import complex_ode
def evolve_DOP853(J, chi, psi, t_min, t_max, Nt, callback_fun):
t, dt = np.linspace(t_min, t_max, Nt, endpoint=True, retstep=True)
#J0 = J[1,2]
#def _NMPN_RHS(dt, x):
# M = np.sum(x)
# return -1j*(-J0*(M-x) + chi*np.abs(x)**2*x)
# -- EQUATIONS OF MOTION FOR NONLINEAR MULTIMODE PHOTONIC NETWORK (NMPN)
_NMPN_RHS = lambda dt, x: -1j*(-np.dot(J,x) + chi*np.abs(x)**2*x)
solver = complex_ode(_NMPN_RHS)
solver.set_integrator('dop853', rtol=1e-10)
solver.set_initial_value(psi, t.min())
it=0
while solver.successful() and solver.t < t.max():
solver.integrate(solver.t+dt)
callback_fun(it, solver.t, solver.y)
it += 1
return solver.t, solver.y
| StarcoderdataPython |
106919 | <gh_stars>0
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core import mail
from django.urls import reverse
from django.test import TestCase
from django.test.client import Client
from helpdesk.models import Queue, Ticket, FollowUp
from helpdesk import settings as helpdesk_settings
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
import uuid
import datetime
try: # python 3
from urllib.parse import urlparse
except ImportError: # python 2
from urlparse import urlparse
from helpdesk.templatetags.ticket_to_link import num_to_link
class TimeSpentTestCase(TestCase):
def setUp(self):
self.queue_public = Queue.objects.create(
title="Queue 1",
slug="q1",
allow_public_submission=True,
dedicated_time=datetime.timedelta(minutes=60),
)
self.ticket_data = {
"title": "Test Ticket",
"description": "Some Test Ticket",
}
ticket_data = dict(queue=self.queue_public, **self.ticket_data)
self.ticket = Ticket.objects.create(**ticket_data)
self.client = Client()
user1_kwargs = {
"username": "staff",
"email": "<EMAIL>",
"password": make_password("<PASSWORD>"),
"is_staff": True,
"is_superuser": False,
"is_active": True,
}
self.user = User.objects.create(**user1_kwargs)
def test_add_followup(self):
"""Tests whether staff can delete tickets"""
message_id = uuid.uuid4().hex
followup = FollowUp.objects.create(
ticket=self.ticket,
date=datetime.datetime.now(),
title="Testing followup",
comment="Testing followup time spent",
public=True,
user=self.user,
new_status=1,
message_id=message_id,
time_spent=datetime.timedelta(minutes=30),
)
followup.save()
self.assertEqual(followup.time_spent.seconds, 1800)
self.assertEqual(self.ticket.time_spent.seconds, 1800)
self.assertEqual(self.queue_public.time_spent.seconds, 1800)
self.assertTrue(
self.queue_public.dedicated_time.seconds
> self.queue_public.time_spent.seconds
)
| StarcoderdataPython |
90371 | #!/usr/bin/python
print('importing modules')
import random
import math
import numpy
import os
import sys
from time import sleep
from argparse import ArgumentParser
from scipy.optimize import fmin_l_bfgs_b as minimize
from PyNEC import *
import multiprocessing as mp
from Queue import Empty
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from antenna import *
print 'PID:\t'+str(os.getpid())
os.nice(10)
poolsize=mp.cpu_count()+2
m1=mp.Manager()
m2=mp.Manager()
#work_queue = m1.JoinableQueue()
work_queue = m1.Queue()
done_queue = m2.Queue()
#Length units in meters
#random.seed()
#xbounds=[(0.1,0.4),(0.1,2.0),(0.05,0.5)]
def run_worker():
while True:
try:
obj=work_queue.get(1,1)
#work_queue.task_done()
obj.run()
done_queue.put(obj,1,1)
except Empty:
break
#Latin hypercube
def hyp():
rad_step=0.05
length_step=0.1
space_step=0.05
rad=list(numpy.arange(0.01,0.3,rad_step))
length=list(numpy.arange(0.1,1.9,length_step))
space=list(numpy.arange(0.05,0.4,space_step))
random.shuffle(rad) #Creating random blocks
random.shuffle(length) #Creating random blocks
random.shuffle(space) #Creating random blocks
out=[]
for i in range(len(rad)):
out.append(numpy.array([random.uniform(rad[i],rad[i]+rad_step),random.uniform(length[i],length[i]+length_step),random.uniform(space[i],space[i]+space_step)])) #Creating random points inside random blocks
return out
def mont(rad,length,space):
ans=[]
N=50
for i in range(N):
x=numpy.array([random.gauss(rad,math.sqrt(0.001)),random.gauss(length,math.sqrt(0.001)),random.gauss(space,math.sqrt(0.001))])
#ans.append(minimize(function,x,bounds=xbounds,approx_grad=True,disp=False))
ans.append(function(x))
#my=numpy.zeros(len(ans))
#for i in range(len(ans)):
# my[i]=ans[i][1]
#return -numpy.average(my), numpy.std(my)
return numpy.array([-numpy.average(ans), numpy.std(ans)])
def opt(hyp):
ans=[]
stats=[]
min=numpy.zeros((2))
print(hyp)
for i in range(len(hyp)):
tmp=minimize(function,hyp[i],bounds=xbounds,approx_grad=True,disp=False)
stats.append(mont(tmp[0][0],tmp[0][1],tmp[0][2]))
ans.append(tmp)
if stats[i][1]<min[0]:
min[0]=stats[i][1]
min[1]=i
best=ans[int(min[1])][1]
rad=ans[int(min[1])][0][0]
length=ans[int(min[1])][0][1]
space=ans[int(min[1])][0][2]
print('Avg='+str(stats[int(min[1])][0])+'\tSTD='+str(stats[int(min[1])][1])+'\tRad='+str(rad)+'\tL='+str(length)+'\tSpace='+str(space))
return best, rad, length, space
def main():
#rad=list(numpy.arange(0.01,0.3,rad_step))
#length=list(numpy.arange(0.1,1.9,length_step))
#space=list(numpy.arange(0.05,0.4,space_step))
freq=144.0
l=2
d=2
steps=10
length=numpy.linspace(0.01,l,l*steps)
diameter=numpy.linspace(0.01,d,d*steps)
count=0
print 'Filling Queue'
for l in length:
for d in diameter:
space=numpy.linspace(0.01,l,l*steps)
for s in space:
work_queue.put(helix(freq,l,d,s),1,1)
count+=1
sys.stdout.write('\r'+str(count))
size=work_queue.qsize()
print '\nProcessing '+str(count)+' antennas'
print 'Running '+str(poolsize)+' threads'
threads=[]
for t in range(poolsize):
a=mp.Process(target=run_worker)
a.start()
threads.append(a)
while not work_queue.empty():
sleep(0.25)
sys.stdout.write('\r'+str((size-work_queue.qsize())*100/size)+'%')
print '\nwork queue empty'
#work_queue.join()
for i,t in enumerate(threads):
t.join()
print 'Joined threads'
print str(100*done_queue.qsize()/size)+'% passed'
print 'Done with '+str(len(antennas))+' antennas'
antennas=[]
while not done_queue.empty():
a=done_queue.get(1,1)
if a.vswr>=1 and a.vswr<=3:
antennas.append(a)
print 'Reduced to '+len(antennas)+' antennas'
#print('Generating hyp')
#hyp_val=hyp()
#print('Running opt')
#best, rad, length, space=opt(hyp_val)
#print('Best='+str(-best))
#print('Rad='+str(rad)+'\tLength='+str(length)+'\tSpace='+str(space))
#print('Running mont')
#avg, std=mont(rad,length,space)
#print('Average='+str(avg))
#print('STD='+str(std))
if __name__=='__main__':
main()
| StarcoderdataPython |
4814925 | <filename>finbyz_reports/finbyz/report/finbyz_accounts_receivable_summary/finbyz_accounts_receivable_summary.py
# Copyright (c) 2013, saurabh and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.accounts.report.accounts_receivable.accounts_receivable import ReceivablePayableReport
class AccountsReceivableSummary(ReceivablePayableReport):
def run(self, args):
party_naming_by = frappe.db.get_value(args.get("naming_by")[0], None, args.get("naming_by")[1])
return self.get_columns(party_naming_by, args), self.get_data(party_naming_by, args)
def get_columns(self, party_naming_by, args):
columns = [_(args.get("party_type")) + ":Link/" + args.get("party_type") + ":200"]
columns.append(_(args.get("sales_partner")) + ":Link/" + args.get("sales_partner") + ":200")
if party_naming_by == "Naming Series":
columns += [ args.get("party_type") + " Name::140"]
columns += [
_("Total Invoiced Amt") + ":Currency/currency:140",
_("Total Paid Amt") + ":Currency/currency:140",
_("Total Outstanding Amt") + ":Currency/currency:160",
"0-" + str(self.filters.range1) + ":Currency/currency:100",
str(self.filters.range1) + "-" + str(self.filters.range2) + ":Currency/currency:100",
str(self.filters.range2) + "-" + str(self.filters.range3) + ":Currency/currency:100",
str(self.filters.range3) + _("-Above") + ":Currency/currency:100"]
if args.get("party_type") == "Customer":
columns += [_("Territory") + ":Link/Territory:80"]
if args.get("party_type") == "Supplier":
columns += [_("Supplier Type") + ":Link/Supplier Type:80"]
columns.append({
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"width": 80
})
return columns
def get_data(self, party_naming_by, args):
data = []
partywise_total = self.get_partywise_total(party_naming_by, args)
for party, party_dict in partywise_total.items():
for x in xrange(1,100):
print partywise_total.items()
row = [party]
row += [party]
if party_naming_by == "Naming Series":
row += [self.get_party_name(args.get("party_type"), party)]
row += [
party_dict.invoiced_amt, party_dict.paid_amt, party_dict.outstanding_amt,
party_dict.range1, party_dict.range2, party_dict.range3, party_dict.range4,
]
if args.get("party_type") == "Customer":
row += [self.get_territory(party)]
if args.get("party_type") == "Supplier":
row += [self.get_supplier_type(party)]
row.append(party_dict.currency)
data.append(row)
return data
def get_partywise_total(self, party_naming_by, args):
party_total = frappe._dict()
for d in self.get_voucherwise_data(party_naming_by, args):
party_total.setdefault(d.party,
frappe._dict({
"invoiced_amt": 0,
"paid_amt": 0,
"outstanding_amt": 0,
"range1": 0,
"range2": 0,
"range3": 0,
"range4": 0
})
)
for k in party_total[d.party].keys():
party_total[d.party][k] += d.get(k, 0)
party_total[d.party].currency = d.currency
return party_total
def get_voucherwise_data(self, party_naming_by, args):
voucherwise_data = ReceivablePayableReport(self.filters).run(args)[1]
for x in xrange(1,10):
print voucherwise_data
cols = ["posting_date", "party"]
if party_naming_by == "Naming Series":
cols += ["party_name"]
cols += ["voucher_type", "voucher_no", "due_date"]
if args.get("party_type") == "Supplier":
cols += ["bill_no", "bill_date"]
cols += ["invoiced_amt", "paid_amt",
"outstanding_amt", "age", "range1", "range2", "range3", "range4", "currency"]
if args.get("party_type") == "Supplier":
cols += ["supplier_type", "remarks"]
if args.get("party_type") == "Customer":
cols += ["territory", "remarks"]
return self.make_data_dict(cols, voucherwise_data)
def make_data_dict(self, cols, data):
data_dict = []
for d in data:
data_dict.append(frappe._dict(zip(cols, d)))
return data_dict
def execute(filters=None):
args = {
"party_type": "Customer",
"sales_partner": "Sales Partner",
"naming_by": ["Selling Settings", "cust_master_name"],
}
return AccountsReceivableSummary(filters).run(args)
# from __future__ import unicode_literals
# import frappe
# def execute(filters=None):
# columns = [
# {
# 'fieldname': 'creation_date',
# 'label': 'Date',
# 'fieldtype': 'Date'
# },
# {
# 'fieldname': 'mins',
# 'fieldtype': 'Float',
# 'label': 'Mins to First Response'
# },
# ]
# data = frappe.db.sql('''select date(creation) as creation_date,
# avg(mins_to_first_response) as mins
# from tabOpportunity
# where date(creation) between %s and %s
# and mins_to_first_response > 0
# group by creation_date order by creation_date desc''', (filters.from_date, filters.to_date))
# return columns, data | StarcoderdataPython |
53468 | import os
import sys
import json
accasim = os.path.abspath(os.path.join('../../accasim'))
sys.path.insert(0, accasim)
import unittest
from accasim.base.resource_manager_class import Resources
class ResourcesTests(unittest.TestCase):
def load_sys_config(self):
fp = 'data/system_def.config'
data = None
with open(fp) as f:
data = json.load(f)
return data['groups'], data['resources']
def test_init_resources(self):
groups, resources = self.load_sys_config()
resources = Resources(groups, resources)
class ResourcesTotalCheckTests(unittest.TestCase):
def load_sys_config(self):
fp = 'data/RICC.config'
data = None
with open(fp) as f:
data = json.load(f)
return data['groups'], data['resources']
def test_init_resources(self):
groups, resources = self.load_sys_config()
resources = Resources(groups, resources)
total_resources = resources.system_capacity('total')
self.assertTrue(total_resources['core'] == 8384, 'Incorrect core def.')
self.assertTrue(total_resources['mem'] == 12576000000, 'Incorrect core def.')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
87688 | <filename>sql_to_python.py
#sql_to_python: Simple module using Python to pull SQL data
| StarcoderdataPython |
160192 | import pytest
import scryptlib.utils
import scryptlib.contract
from scryptlib.types import Sig, PubKey, PubKeyHash
import bitcoinx
from bitcoinx import SigHash, PrivateKey, pack_byte
key_priv = PrivateKey.from_arbitrary_bytes(b'test123')
key_pub = key_priv.public_key
pubkey_hash = key_pub.hash160()
wrong_key_priv = PrivateKey.from_arbitrary_bytes(b'somethingelse')
wrong_key_pub = wrong_key_priv.public_key
contract = './test/res/p2pkh.scrypt'
compiler_result = scryptlib.utils.compile_contract(contract)
desc = compiler_result.to_desc()
P2PKH = scryptlib.contract.build_contract_class(desc)
p2pkh_obj = P2PKH(PubKeyHash(pubkey_hash))
context = scryptlib.utils.create_dummy_input_context()
sighash_flag = SigHash(SigHash.ALL | SigHash.FORKID)
input_idx = 0
utxo_satoshis = context.utxo.value
sighash = context.tx.signature_hash(input_idx, utxo_satoshis, p2pkh_obj.locking_script, sighash_flag)
def test_verify_correct_key():
sig = key_priv.sign(sighash, hasher=None)
sig = sig + pack_byte(sighash_flag)
verify_result = p2pkh_obj.unlock(Sig(sig), PubKey(key_pub)).verify(context)
assert verify_result == True
def test_verify_wrong():
sig = wrong_key_priv.sign(sighash, hasher=None)
sig = sig + pack_byte(sighash_flag)
with pytest.raises(bitcoinx.VerifyFailed):
p2pkh_obj.unlock(Sig(sig), PubKey(wrong_key_pub)).verify(context)
| StarcoderdataPython |
1742265 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2019-03-26 17:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Interaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('object_id', models.IntegerField(db_index=True, null=True)),
('rating', models.FloatField(null=True)),
('content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='interaction_objects', to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OverallRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('rating', models.FloatField(null=True)),
('category', models.CharField(blank=True, choices=[('ticket-management-advisor', 'Ticket Management - Advisor'), ('ticket-management-user', 'Ticket Management - User'), (
'ticket-advisor', 'Advisor in ticket'), ('answer-advisor', 'Advisor in questions'), ('step-feedback', 'Step Feedback')], max_length=250)),
('object_id', models.IntegerField(db_index=True, null=True)),
('context_object_id', models.IntegerField(db_index=True, null=True)),
('content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='overall_objects', to='contenttypes.ContentType')),
('context_content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='overall_contexts', to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('rating', models.IntegerField()),
('comment', models.TextField(blank=True, null=True)),
('category', models.CharField(blank=True, choices=[('ticket-management-advisor', 'Ticket Management - Advisor'), ('ticket-management-user', 'Ticket Management - User'), (
'ticket-advisor', 'Advisor in ticket'), ('answer-advisor', 'Advisor in questions'), ('step-feedback', 'Step Feedback')], max_length=250)),
('object_id', models.IntegerField(db_index=True, null=True)),
('context_object_id', models.IntegerField(db_index=True, null=True)),
('content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='rating_objects', to='contenttypes.ContentType')),
('context_content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='rating_contexts', to='contenttypes.ContentType')),
('overall_rating', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='ratings', to='ratings.OverallRating')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SkipRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('object_id', models.IntegerField(db_index=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('category', models.CharField(blank=True, choices=[('ticket-management-advisor', 'Ticket Management - Advisor'), ('ticket-management-user', 'Ticket Management - User'), (
'ticket-advisor', 'Advisor in ticket'), ('answer-advisor', 'Advisor in questions'), ('step-feedback', 'Step Feedback')], max_length=250)),
('context_object_id', models.IntegerField(db_index=True, null=True)),
('content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='skip_rating_objects', to='contenttypes.ContentType')),
('context_content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='skip_rating_contexts', to='contenttypes.ContentType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='interaction',
name='ratings',
field=models.ManyToManyField(related_name='interactions', to='ratings.OverallRating'),
),
migrations.AddField(
model_name='interaction',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='interactions', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='skiprating',
unique_together=set([('object_id', 'content_type', 'user', 'category',
'context_content_type', 'context_object_id')]),
),
migrations.AlterUniqueTogether(
name='rating',
unique_together=set([('object_id', 'content_type', 'user', 'category',
'context_content_type', 'context_object_id')]),
),
migrations.AlterUniqueTogether(
name='overallrating',
unique_together=set([('context_object_id', 'context_content_type',
'category', 'object_id', 'content_type')]),
),
]
| StarcoderdataPython |
118832 | # The following is an implementation of the base encoder from
# the pywallet (https://github.com/jackjack-jj/pywallet), which
# is not subject to any license. Through Simple Wallet, this is
# subject to the following license.
# Copyright (c) 2022 Mystic Technology LLC
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Optional, Union
from ..utils.conversion import assert_bytes, to_bytes
class Base:
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
@classmethod
def encode(self, v: bytes, *, base: int) -> str:
# Encode v, which is a string of bytes, to base58.
assert_bytes(v)
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = self.__b58chars
if base == 43:
chars = self.__b43chars
long_value = 0
power_of_base = 1
# naive but slow variant: long_value += (256**i) * c
for c in v[::-1]:
long_value += power_of_base * c
power_of_base <<= 8
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
@classmethod
def decode(self, v: Union[bytes, str], *, base: int, length: int = None) -> Optional[bytes]:
# decode v into a string of len bytes.
v = to_bytes(v, 'ascii')
if base not in (58, 43):
raise ValueError('Error: Not supported base: {}'.format(base))
chars = self.__b58chars
if base == 43:
chars = self.__b43chars
long_value = 0
power_of_base = 1
for c in v[::-1]:
digit = chars.find(bytes([c]))
if digit == -1:
raise Exception('Error: Forbidden character {} for base {}'.format(c, base))
# naive but slow variant: long_value += digit * (base**i)
long_value += digit * power_of_base
power_of_base *= base
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result) | StarcoderdataPython |
90240 | #!/usr/bin/env python
from butter.clone import unshare, setns
import pytest
@pytest.mark.clone
def test_setns(mock):
m = mock.patch('butter.clone._lib')
m = mock.patch('butter.clone._lib.setns')
m.return_value = 0
setns(fd=5)
| StarcoderdataPython |
39626 | <reponame>mmanzi/gradientdomain-mitsuba
import os, sys, subprocess, copy, re
def get_output(script, args = None, shellenv = None):
if sys.platform == 'win32':
cmdLine = '"%s" %s & set' % (script, (args if args else ''))
shell = False
elif sys.platform.startswith('linux'):
cmdLine = 'source "%s" %s ; set' % (script, (args if args else ''))
shell = True
else:
raise Exception("Unsuported OS type: " + sys.platform)
popen = subprocess.Popen(cmdLine, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=shellenv)
# Use the .stdout and .stderr attributes directly because the
# .communicate() method uses the threading module on Windows
# and won't work under Pythons not built with threading.
stdout = popen.stdout.read()
if popen.wait() != 0:
raise IOError(popen.stderr.read())
output = stdout
return output
def parse_output(output, keep = None):
ret={} #this is the data we will return
## parse everything
reg=re.compile('(\\w*)=(.*)', re.I)
for line in output.splitlines():
m=reg.match(line)
if m:
if keep is not None:
#see if we need to filter out data
k=m.group(1)
if k in keep:
ret[k]=m.group(2)#.split(os.pathsep)
else:
# take everything
ret[m.group(1)]=m.group(2)#.split(os.pathsep)
#see if we need to filter out data
if keep is not None:
pass
return ret
def normalize_env(shellenv, keys):
"""Given a dictionary representing a shell environment, add the variables
from os.environ needed for the processing of .bat files; the keys are
controlled by the keys argument.
It also makes sure the environment values are correctly encoded.
Note: the environment is copied"""
normenv = {}
if shellenv:
if sys.platform=='win32':
for k in shellenv.keys():
normenv[k] = copy.deepcopy(shellenv[k]).encode('mbcs')
for k in keys:
if os.environ.has_key(k):
normenv[k] = os.environ[k]
return normenv
def get_script_env(env,script,args=None,vars=None):
'''
this function returns a dictionary of all the data we want to merge
or process in some other way.
'''
if sys.platform=='win32':
nenv = normalize_env(env['ENV'], ['COMSPEC'])
else:
nenv = normalize_env(env['ENV'], [])
output = get_output(script,args,nenv)
vars = parse_output(output, vars)
return vars
def merge_script_vars(env,script,args=None,vars=None):
'''
This merges the data retieved from the script in to the Enviroment
by prepending it.
script is the name of the script, args is optional arguments to pass
vars are var we want to retrieve, if None it will retieve everything found
'''
shell_env=get_script_env(env,script,args,vars)
for k, v in shell_env.iteritems():
env.PrependENVPath(k, v, delete_existing=1)
def generate(env):
if 'INTEL_COMPILER' not in env or env['INTEL_COMPILER'] != True:
return
if env['TARGET_ARCH'] == 'x86':
arch = 'ia32'
arch_redist = 'ia32'
elif env['TARGET_ARCH'] == 'x86_64' or env['TARGET_ARCH'] == 'amd64':
arch = 'ia32_intel64'
arch_redist = 'intel64'
else:
raise Exception('Unknown architecture ' + env['TARGET_ARCH'])
if env['MSVC_VERSION'] == '9.0':
vsrelease = 'vs2008'
elif env['MSVC_VERSION'] == '10.0':
vsrelease = 'vs2010'
else:
raise Exception('Unknown version of visual studio!')
if 'ICPP_COMPOSER2014' in os.environ:
icpp_path = os.environ.get('ICPP_COMPOSER2014')
elif 'ICPP_COMPILER14' in os.environ:
icpp_path = os.environ.get('ICPP_COMPILER14')
elif 'ICPP_COMPOSER2013' in os.environ:
icpp_path = os.environ.get('ICPP_COMPOSER2013')
elif 'ICPP_COMPILER13' in os.environ:
icpp_path = os.environ.get('ICPP_COMPILER13')
elif 'ICPP_COMPOSER2011' in os.environ:
icpp_path = os.environ.get('ICPP_COMPOSER2011')
elif 'ICPP_COMPILER12' in os.environ:
icpp_path = os.environ.get('ICPP_COMPILER12')
else:
raise Exception('Could not find any of the ICCPP_* environment variables!')
merge_script_vars(env, os.path.join(icpp_path, 'bin/iclvars.bat'), arch + ' ' + vsrelease)
env['REDIST_PATH'] = os.path.join(os.path.join(os.path.join(icpp_path, 'redist'), arch_redist), 'compiler')
def exists(env):
if 'INTEL_COMPILER' not in env or env['INTEL_COMPILER'] != True:
return False
return 'ICPP_COMPOSER2011' in os.environ
| StarcoderdataPython |
3216017 | import os
import shutil
import logging
import pandas as pd
import matplotlib
matplotlib.use("agg") # no need for tk
from supervised.automl import AutoML
from frameworks.shared.callee import call_run, result, output_subdir, utils
log = logging.getLogger(os.path.basename(__file__))
def run(dataset, config):
log.info("\n**** mljar-supervised ****\n")
column_names, _ = zip(*dataset.columns)
column_types = dict(dataset.columns)
X_train = pd.DataFrame(dataset.train.X, columns=column_names).astype(
column_types, copy=False
)
X_test = pd.DataFrame(dataset.test.X, columns=column_names).astype(
column_types, copy=False
)
y_train = dataset.train.y.flatten()
y_test = dataset.test.y.flatten()
problem_mapping = dict(
binary="binary_classification",
multiclass="multiclass_classification",
regression="regression",
)
is_classification = config.type == "classification"
ml_task = problem_mapping.get(
dataset.problem_type
) # if None the AutoML will guess about the ML task
results_path = output_subdir("results", config)
training_params = {
k: v for k, v in config.framework_params.items() if not k.startswith("_")
}
automl = AutoML(
results_path=results_path,
total_time_limit=config.max_runtime_seconds,
seed=config.seed,
ml_task=ml_task,
**training_params
)
with utils.Timer() as training:
automl.fit(X_train, y_train)
preds = automl.predict(X_test)
predictions, probabilities = None, None
if is_classification:
predictions = preds["label"].values
probabilities = preds[preds.columns[:-1]].values
else:
predictions = preds["prediction"].values
# clean the results
if not config.framework_params.get("_save_artifacts", False):
shutil.rmtree(results_path, ignore_errors=True)
return result(
output_file=config.output_predictions_file,
predictions=predictions,
truth=y_test,
probabilities=probabilities,
models_count=len(automl._models),
training_duration=training.duration,
)
if __name__ == "__main__":
call_run(run)
| StarcoderdataPython |
68825 | import gtk
import gtk.gdk as gdk
import gobject
class Fixed(gtk.Container):
def __init__(self):
gtk.Container.__init__(self)
self._children = []
self._changed = False
def max_xy(self):
X = 0
Y = 0
for x, y, child in self._children:
w, h = child.size_request()
X = max(X, x+w)
Y = max(Y, y+h)
return X, Y
def put(self, x, y, widget):
widget.set_parent(self)
self._children.append((x, y, widget))
self._changed = True
def do_realize(self):
# The do_realize method is responsible for creating GDK (windowing system)
# resources. In this example we will create a new gdk.Window which we
# then draw on
# First set an internal flag telling that we're realized
self.set_flags(gtk.REALIZED)
# Create a new gdk.Window which we can draw on.
# Also say that we want to receive exposure events by setting
# the event_mask
self.window = gdk.Window(
self.get_parent_window(),
width=self.allocation.width,
height=self.allocation.height,
window_type=gdk.WINDOW_CHILD,
wclass=gdk.INPUT_OUTPUT,
event_mask=self.get_events() | gdk.EXPOSURE_MASK | gdk.BUTTON_PRESS_MASK)
# Associate the gdk.Window with ourselves, Gtk+ needs a reference
# between the widget and the gdk window
self.window.set_user_data(self)
# Attach the style to the gdk.Window, a style contains colors and
# GC contextes used for drawing
self.style.attach(self.window)
# The default color of the background should be what
# the style (theme engine) tells us.
self.style.set_background(self.window, gtk.STATE_NORMAL)
self.window.move_resize(*self.allocation)
def do_unrealize(self):
self.window.set_user_data(None)
def do_size_allocate(self, allocation):
# print 'allocate###', allocation
self.allocation = allocation
if self.flags() & gtk.REALIZED:
self.window.move_resize(*allocation)
for x, y, child in self._children:
w, h = child.size_request()
child.size_allocate(gdk.Rectangle(x, y, w, h))
def do_size_request(self, requisition):
if self._changed:
print "####request", requisition.width, requisition.height
X, Y = self.max_xy()
requisition.width = X
requisition.height = Y
self._changed = False
def do_forall(self, internal, callback, data):
for _, _, child in self._children:
callback(child, data)
gobject.type_register(Fixed)
if __name__ == '__main__':
F = Fixed()
F.put(50, 50, gtk.Button("Button 1"))
F.put(150, 90, gtk.Button("Button 2"))
def foo(widget):
print "fooo"
#F.do_size_allocate(F.allocation)
b = gtk.Button("Click!")
b.connect("clicked", foo)
hbox = gtk.HBox()
hbox.pack_start(gtk.Button("Unused"))
hbox.pack_start(F, expand=True, fill=True)
hbox.pack_start(b, expand=False, fill=False)
d = gtk.Dialog()
d.vbox.pack_start(hbox)
d.vbox.pack_start(gtk.Button("Test"))
d.show_all()
# run
d.run()
# vim: ts=4 sw=4
| StarcoderdataPython |
1646554 | import json
from typing import List
class RelationTypeConstraintStore:
def __init__(self):
self.constraints = {}
def load_from_json(self, constraint_json_file: str):
with open(constraint_json_file, 'rt') as f:
self.constraints = json.load(f)
self._verify_integrity()
def get_subject_constraints(self, relation: str) -> List[str]:
return self.constraints[relation]["subjects"]
def get_object_constraints(self, relation: str) -> List[str]:
return self.constraints[relation]["objects"]
def _verify_integrity(self):
for relation, constraint in self.constraints.items():
if 'subjects' not in constraint or not constraint['subjects']:
raise ValueError(f'subject constraints missing for: {relation}')
if 'objects' not in constraint or not constraint['objects']:
raise ValueError(f'object constraints missing for: {relation}')
| StarcoderdataPython |
1799326 | <filename>nanoservice/reqrep.py
'''
The MIT License (MIT)
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import uuid
import nanomsg
import logging
from .error import DecodeError
from .error import RequestParseError
from .error import AuthenticateError
from .error import AuthenticatorInvalidSignature
from .encoder import MsgPackEncoder
from .core import Endpoint
from .core import Process
class Responder(Endpoint, Process):
""" A service which responds to requests """
# pylint: disable=too-many-arguments
# pylint: disable=no-member
def __init__(self, address, encoder=None, authenticator=None,
socket=None, bind=True, timeouts=(None, None)):
# Defaults
socket = socket or nanomsg.Socket(nanomsg.REP)
encoder = encoder or MsgPackEncoder()
super(Responder, self).__init__(
socket, address, bind, encoder, authenticator, timeouts)
self.methods = {}
self.descriptions = {}
def execute(self, method, args, ref):
""" Execute the method with args """
response = {'result': None, 'error': None, 'ref': ref}
fun = self.methods.get(method)
if not fun:
response['error'] = 'Method `{}` not found'.format(method)
else:
try:
response['result'] = fun(*args)
except Exception as exception:
logging.error(exception, exc_info=1)
response['error'] = str(exception)
return response
def register(self, name, fun, description=None):
""" Register function on this service """
self.methods[name] = fun
self.descriptions[name] = description
@classmethod
def parse(cls, payload):
""" Parse client request """
try:
method, args, ref = payload
except Exception as exception:
raise RequestParseError(exception)
else:
return method, args, ref
# pylint: disable=logging-format-interpolation
def process(self):
""" Receive data from socket and process request """
response = None
try:
payload = self.receive()
method, args, ref = self.parse(payload)
response = self.execute(method, args, ref)
except AuthenticateError as exception:
logging.error(
'Service error while authenticating request: {}'
.format(exception), exc_info=1)
except AuthenticatorInvalidSignature as exception:
logging.error(
'Service error while authenticating request: {}'
.format(exception), exc_info=1)
except DecodeError as exception:
logging.error(
'Service error while decoding request: {}'
.format(exception), exc_info=1)
except RequestParseError as exception:
logging.error(
'Service error while parsing request: {}'
.format(exception), exc_info=1)
else:
logging.debug('Service received payload: {}'.format(payload))
if response:
self.send(response)
else:
self.send('')
class Requester(Endpoint):
""" A requester client """
# pylint: disable=too-many-arguments
# pylint: disable=no-member
def __init__(self, address, encoder=None, authenticator=None,
socket=None, bind=False, timeouts=(None, None)):
# Defaults
socket = socket or nanomsg.Socket(nanomsg.REQ)
encoder = encoder or MsgPackEncoder()
super(Requester, self).__init__(
socket, address, bind, encoder, authenticator, timeouts)
@classmethod
def build_payload(cls, method, args):
""" Build the payload to be sent to a `Responder` """
ref = str(uuid.uuid4())
return (method, args, ref)
# pylint: disable=logging-format-interpolation
def call(self, method, *args):
""" Make a call to a `Responder` and return the result """
payload = self.build_payload(method, args)
logging.debug('* Client will send payload: {}'.format(payload))
self.send(payload)
res = self.receive()
assert payload[2] == res['ref']
return res['result'], res['error']
| StarcoderdataPython |
1720691 | <reponame>ExiledNarwal28/cardbot
from random import sample
import inject
from app.cards.factories.card_factories import CardFactory
from app.cards.entities.cards import NORMAL_DECK_LENGTH
from app.cards.entities.decks import Deck
class DeckFactory:
card_factory = inject.attr(CardFactory)
def create(self):
cards = []
values = sample(range(1, NORMAL_DECK_LENGTH + 1), NORMAL_DECK_LENGTH)
for value in values:
card = self.card_factory.create(value)
cards.append(card)
return Deck(cards)
| StarcoderdataPython |
4821462 | import json
from logging import getLogger
from django.contrib.auth import logout
from django.urls import reverse_lazy
from django.utils.safestring import mark_safe
from django.views.generic import TemplateView
from django.views.generic.base import RedirectView
from es_user.vouch_proxy import VouchProxyJWT
from es_user.models import UserJWT
from es_common.utils import safe_json
LOGGER = getLogger(__name__)
class UserView(TemplateView):
"""
Show information about the user
"""
template_name = 'es_user/user.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# JWT from the user account
user = self.request.user
if user.is_authenticated:
user_jwt = UserJWT.objects.filter(user=user).first()
if user_jwt:
context['user_jwt'] = user_jwt.jwt
else:
context['user_jwt'] = "not saved"
# JWT from the request cookie
context['cookie_jwt'] = VouchProxyJWT(self.request)
try:
context['user_claims'] = UserJWT.get_all_user_claims(user)
LOGGER.info(context['user_claims'])
except Exception as e:
LOGGER.warning(e)
return context
class LoginView(RedirectView):
"""
Handle login
"""
def get_redirect_url(self, *args, **kwargs):
next_page = self.request.GET.get('next_page') or reverse_lazy('user-home')
if self.request.user.is_authenticated:
return next_page
else:
next_url = self.request.build_absolute_uri(next_page)
return '/login?url=%s' % next_url
class LogoutView(RedirectView):
"""
Handle logout, which has a couple components :P
"""
def get_redirect_url(self, *args, **kwargs):
if self.request.user.is_authenticated:
logout(self.request)
next_page = self.request.GET.get('next_page') or reverse_lazy('user-home')
next_url = self.request.build_absolute_uri(next_page)
return '/logout?url=%s' % next_url
| StarcoderdataPython |
3275487 | <gh_stars>0
#pylint: disable=missing-module-docstring
from unittest import TestCase
from src.cell import Cell
#pylint: disable=missing-class-docstring
class CellTest(TestCase):
#pylint: disable=missing-function-docstring
def test_cell_starts_not_digged(self):
cell = Cell()
self.assertFalse(cell.digged())
def test_cell_starts_not_flagged(self):
cell = Cell()
self.assertFalse(cell.flagged())
def test_has_mine_without_mine(self):
cell = Cell()
self.assertFalse(cell.has_mine())
def test_has_mine_with_mine(self):
cell = Cell(True)
self.assertTrue(cell.has_mine())
def test_digged_not_digged(self):
cell = Cell()
self.assertFalse(cell.digged())
def test_digged_digged(self):
cell = Cell()
cell.dig()
self.assertTrue(cell.digged())
def test_flagged_not_flagged(self):
cell = Cell()
self.assertFalse(cell.flagged())
def test_flagged_flagged(self):
cell = Cell()
cell.switch_flag()
self.assertTrue(cell.flagged())
def test_switch_flag_covered(self):
cell = Cell()
cell.switch_flag()
self.assertTrue(cell.flagged())
def test_switch_flag_uncovered(self):
cell = Cell()
cell.dig()
cell.switch_flag()
self.assertFalse(cell.flagged())
def test_switch_flag_flagged(self):
cell = Cell()
cell.switch_flag()
cell.switch_flag()
self.assertFalse(cell.flagged())
def test_dig_once_returns_true(self):
cell = Cell()
self.assertTrue(cell.dig())
def test_dig_twice_returns_false(self):
cell = Cell()
cell.dig()
self.assertFalse(cell.dig())
def test_dig_marks_digged_as_true(self):
cell = Cell()
cell.dig()
self.assertTrue(cell.digged())
def test_equality_mine(self):
cell_1 = Cell(False)
cell_2 = Cell(False)
self.assertTrue(cell_1 == cell_2)
def test_inequality_mine(self):
cell_1 = Cell(True)
cell_2 = Cell(False)
self.assertFalse(cell_1 == cell_2)
def test_equality_dig(self):
cell_1 = Cell(True)
cell_1.dig()
cell_2 = Cell(True)
cell_2.dig()
self.assertTrue(cell_1 == cell_2)
def test_inequality_dig(self):
cell_1 = Cell(False)
cell_1.dig()
cell_2 = Cell(False)
self.assertFalse(cell_1 == cell_2)
def test_equality_flag(self):
cell_1 = Cell(True)
cell_1.switch_flag()
cell_2 = Cell(True)
cell_2.switch_flag()
self.assertTrue(cell_1 == cell_2)
def test_inequality_flag(self):
cell_1 = Cell(True)
cell_1.switch_flag()
cell_2 = Cell(True)
self.assertFalse(cell_1 == cell_2)
def test_winnable_no_mine_no_dug(self):
cell = Cell(False)
self.assertFalse(cell.winnable())
def test_winnable_no_mine_dug(self):
cell = Cell(False)
cell.dig()
self.assertTrue(cell.winnable())
def test_winnable_mine_no_dug(self):
cell = Cell(True)
self.assertTrue(cell.winnable())
def test_winnable_mine_dug(self):
cell = Cell(True)
cell.dig()
self.assertFalse(cell.winnable())
| StarcoderdataPython |
3264552 | <reponame>rossi1/RES<filename>real_estate_api/supplier/permission.py
from rest_framework.permissions import BasePermission
class IsSupplier(BasePermission):
def has_permission(self, request, view):
return request.user.is_supplier | StarcoderdataPython |
3261523 | <reponame>gojek/CureIAM<filename>CureIAM/models/__init__.py
"""A package for models as data store packaged with this project.
"""
| StarcoderdataPython |
3387772 | <gh_stars>10-100
import gym
import numpy as np
from copo.algo_copo.constants import *
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.utils import get_activation_fn
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import get_activation_fn, try_import_tf
from ray.rllib.utils.tf_ops import make_tf_callable
from ray.rllib.utils.typing import TensorType, ModelConfigDict
tf1, tf, tfv = try_import_tf()
class NeiValueNetworkMixin:
def __init__(self, obs_space, action_space, config):
if config.get("use_gae"):
if self.config[USE_CENTRALIZED_CRITIC]:
@make_tf_callable(self.get_session(), dynamic_shape=True)
def cc_v(ob, prev_action=None, prev_reward=None, *state):
return self.model.value_function(ob)
self.get_cc_value = cc_v
@make_tf_callable(self.get_session(), dynamic_shape=True)
def nei_v(ob, prev_action=None, prev_reward=None, *state):
return self.model.get_nei_value(ob)
self.get_nei_value = nei_v
@make_tf_callable(self.get_session(), dynamic_shape=True)
def global_v(ob, prev_action=None, prev_reward=None, *state):
return self.model.get_global_value(ob)
self.get_global_value = global_v
else:
@make_tf_callable(self.get_session())
def nei_value(ob, prev_action, prev_reward, *state):
model_out, _ = self.model(
{
SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]),
SampleBatch.PREV_ACTIONS: tf.convert_to_tensor([prev_action]),
SampleBatch.PREV_REWARDS: tf.convert_to_tensor([prev_reward]),
"is_training": tf.convert_to_tensor(False),
}, [tf.convert_to_tensor([s]) for s in state], tf.convert_to_tensor([1])
)
return self.model.get_nei_value()[0]
@make_tf_callable(self.get_session())
def global_value(ob, prev_action, prev_reward, *state):
model_out, _ = self.model(
{
SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]),
SampleBatch.PREV_ACTIONS: tf.convert_to_tensor([prev_action]),
SampleBatch.PREV_REWARDS: tf.convert_to_tensor([prev_reward]),
"is_training": tf.convert_to_tensor(False),
}, [tf.convert_to_tensor([s]) for s in state], tf.convert_to_tensor([1])
)
return self.model.get_global_value()[0]
self.get_nei_value = nei_value
self.get_global_value = global_value
else:
raise ValueError()
def assign_svo(self, svo_param, svo_std_param=None):
if self.config[USE_DISTRIBUTIONAL_SVO]:
assert svo_std_param is not None
return self.get_session().run(
[self._svo_assign_op, self._svo_std_assign_op],
feed_dict={
self._svo_ph: svo_param,
self._svo_std_ph: svo_std_param
}
)
else:
return self.get_session().run(self._svo_assign_op, feed_dict={self._svo_ph: svo_param})
def register_copo_model():
ModelCatalog.register_custom_model("copo_model", CoPOModel)
class CoPOModel(TFModelV2):
"""Generic fully connected network implemented in ModelV2 API."""
def __init__(
self, obs_space: gym.spaces.Space, action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str
):
super(CoPOModel, self).__init__(obs_space, action_space, num_outputs, model_config, name)
activation = get_activation_fn(model_config.get("fcnet_activation"))
hiddens = model_config.get("fcnet_hiddens", [])
no_final_linear = model_config.get("no_final_linear")
vf_share_layers = model_config.get("vf_share_layers")
free_log_std = model_config.get("free_log_std")
use_centralized_critic = model_config[USE_CENTRALIZED_CRITIC]
self.use_centralized_critic = use_centralized_critic
# Generate free-floating bias variables for the second half of
# the outputs.
if free_log_std:
raise ValueError()
# We are using obs_flat, so take the flattened shape as input.
inputs = tf.keras.layers.Input(shape=(int(np.product(obs_space.shape)), ), name="observations")
if use_centralized_critic:
cc_inputs = tf.keras.layers.Input(
shape=(model_config["centralized_critic_obs_dim"], ), name="cc_observations"
)
# ===== Build Policy Network =====
# Last hidden layer output (before logits outputs).
last_layer = inputs
# The action distribution outputs.
logits_out = None
i = 1
# Create layers 0 to second-last.
for size in hiddens[:-1]:
last_layer = tf.keras.layers.Dense(
size, name="fc_{}".format(i), activation=activation, kernel_initializer=normc_initializer(1.0)
)(last_layer)
i += 1
# The last layer is adjusted to be of size num_outputs, but it's a
# layer with activation.
if no_final_linear and num_outputs:
raise ValueError()
# Finish the layers with the provided sizes (`hiddens`), plus -
# iff num_outputs > 0 - a last linear layer of size num_outputs.
else:
if len(hiddens) > 0:
last_layer = tf.keras.layers.Dense(
hiddens[-1],
name="fc_{}".format(i),
activation=activation,
kernel_initializer=normc_initializer(1.0)
)(last_layer)
if num_outputs:
logits_out = tf.keras.layers.Dense(
num_outputs, name="fc_out", activation=None, kernel_initializer=normc_initializer(0.01)
)(last_layer)
# Adjust num_outputs to be the number of nodes in the last layer.
else:
raise ValueError()
# Concat the log std vars to the end of the state-dependent means.
if free_log_std and logits_out is not None:
raise ValueError()
# ===== Build original value function =====
last_vf_layer = None
if not vf_share_layers:
# Build a parallel set of hidden layers for the value net.
if use_centralized_critic:
last_vf_layer = cc_inputs
else:
last_vf_layer = inputs
i = 1
for size in hiddens:
last_vf_layer = tf.keras.layers.Dense(
size,
name="fc_value_{}".format(i),
activation=activation,
kernel_initializer=normc_initializer(1.0)
)(last_vf_layer)
i += 1
else:
raise ValueError()
value_out = tf.keras.layers.Dense(
1, name="value_out", activation=None, kernel_initializer=normc_initializer(0.01)
)(last_vf_layer if last_vf_layer is not None else last_layer)
if use_centralized_critic:
self.base_model = tf.keras.Model(inputs, (logits_out if logits_out is not None else last_layer))
self.cc_value_network = tf.keras.Model(cc_inputs, value_out)
else:
self.base_model = tf.keras.Model(
inputs, [(logits_out if logits_out is not None else last_layer), value_out]
)
self._value_out = None
# ===== Build neighbours value function =====
if use_centralized_critic:
last_vf_layer = cc_inputs
else:
last_vf_layer = inputs
i = 1
for size in hiddens:
last_vf_layer = tf.keras.layers.Dense(
size,
name="fc_value_nei_{}".format(i),
activation=activation,
kernel_initializer=normc_initializer(1.0)
)(last_vf_layer)
i += 1
value_out_nei = tf.keras.layers.Dense(
1, name="value_out_nei", activation=None, kernel_initializer=normc_initializer(0.01)
)(last_vf_layer if last_vf_layer is not None else last_layer)
if use_centralized_critic:
self.nei_value_network = tf.keras.Model(cc_inputs, value_out_nei)
else:
self.nei_value_network = tf.keras.Model(inputs, value_out_nei)
self._last_nei_value = None
# ===== Build global value function =====
if use_centralized_critic:
last_vf_layer = cc_inputs
else:
last_vf_layer = inputs
i = 1
for size in hiddens:
last_vf_layer = tf.keras.layers.Dense(
size,
name="fc_value_global_{}".format(i),
activation=activation,
kernel_initializer=normc_initializer(1.0)
)(last_vf_layer)
i += 1
value_out_global = tf.keras.layers.Dense(
1, name="value_out_global", activation=None, kernel_initializer=normc_initializer(0.01)
)(last_vf_layer if last_vf_layer is not None else last_layer)
if use_centralized_critic:
self.global_value_network = tf.keras.Model(cc_inputs, value_out_global)
else:
self.global_value_network = tf.keras.Model(inputs, value_out_global)
self._last_global_value = None
def forward(self, input_dict, state, seq_lens):
if self.use_centralized_critic:
# Only forward the policy network and left all value functions later.
model_out = self.base_model(input_dict["obs_flat"])
else:
model_out, self._value_out = self.base_model(input_dict["obs_flat"])
self._last_nei_value = self.nei_value_network(input_dict["obs_flat"])
self._last_global_value = self.global_value_network(input_dict["obs_flat"])
return model_out, state
def value_function(self, cc_obs=None) -> TensorType:
if self.use_centralized_critic:
assert cc_obs is not None
return tf.reshape(self.cc_value_network(cc_obs), [-1])
else:
return tf.reshape(self._value_out, [-1])
def get_nei_value(self, cc_obs=None):
if self.use_centralized_critic:
assert cc_obs is not None
return tf.reshape(self.nei_value_network(cc_obs), [-1])
else:
return tf.reshape(self._last_nei_value, [-1])
def get_global_value(self, cc_obs=None):
if self.use_centralized_critic:
assert cc_obs is not None
return tf.reshape(self.global_value_network(cc_obs), [-1])
else:
return tf.reshape(self._last_global_value, [-1])
| StarcoderdataPython |
3375215 | #!/usr/local/greenplum-db-6.10.0/ext/python/bin/python
# coding=utf-8
from .deepwalk import DeepWalk
from .line import LINE
| StarcoderdataPython |
1685957 | <filename>zbpy/indexedfieldentity.py<gh_stars>0
from .zbprotocol_pb2 import TableIndexField, TableIndexFields
class IndexedField():
def __init__(self, field_name, index_type, lang_code=''):
"""
Initializes IndexedField.
Parameters:
field_name: string
index_type: zbprotocol_pb2.QueryOrdering
lang_code: string
Returns:
IndexedField
"""
self.field_name = field_name
self.index_type = index_type
self.lang_code = lang_code
def set_language_code(self, code):
"""
Sets the language code of the IndexedField.
Parameters:
code: string
"""
self.lang_code = code
def to_protocol(self):
"""
Returns:
TableIndexField
"""
return TableIndexField(
field=self.field_name,
ordering=self.index_type,
languageCode=self.lang_code
)
def indexed_fields_to_protocol(ifs):
"""
Parameters:
ifs: IndexedField
Returns:
list of TableIndexFields
"""
arr = []
for field in ifs:
arr.append(field.to_protocol())
return TableIndexFields(
fields=arr
)
| StarcoderdataPython |
4827516 | # -*- coding: utf-8 -*-
from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, TypeVar
if TYPE_CHECKING:
from . import Declaration
T = TypeVar("T", bound=Callable[..., Any])
class FormatHandler(Generic[T]):
__slots__ = "_types"
_types: Dict[str, T]
def __init__(self):
self._types = {}
def register(self, fmt: str):
def decorator(implementer: T):
self._types[fmt] = implementer
return implementer
return decorator
def handle(
self, declaration: "Declaration", fmt: str, *args: Any, **kwargs: Any
):
try:
handler = self._types[fmt]
except KeyError:
raise TypeError(f"Cannot generate {fmt} annotation")
return handler(declaration, *args, **kwargs)
| StarcoderdataPython |
1732471 | from libra.discovery_set import DiscoverySet
def test_discovery_set():
key = DiscoverySet.change_event_key()
assert len(key) == 40
assert key == [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 21, 192]
| StarcoderdataPython |
1696050 | # Generated by Django 3.1.5 on 2021-04-22 13:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True)),
('name', models.CharField(max_length=100, unique=True)),
('credit', models.PositiveIntegerField(default=0)),
('lecture_hours', models.PositiveIntegerField(null=True)),
('tutorial_hours', models.PositiveIntegerField(null=True)),
('pratical_hours', models.PositiveIntegerField(null=True)),
('discussion_hours', models.PositiveIntegerField(null=True)),
('project_hours', models.PositiveIntegerField(null=True)),
('pre_requisits', models.TextField(null=True)),
('syllabus', models.TextField()),
('evaluation_schema', models.TextField()),
('ref_books', models.TextField()),
],
options={
'unique_together': {('code', 'name')},
},
),
migrations.CreateModel(
name='Curriculum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('version', models.PositiveIntegerField(default=1)),
('working_curriculum', models.BooleanField(default=True)),
('no_of_semester', models.PositiveIntegerField(default=1)),
],
),
migrations.CreateModel(
name='Programme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(choices=[('UG', 'Undergraduate'), ('PG', 'Postgraduate'), ('PHD', 'Doctor of Philosophy')], max_length=3)),
('name', models.CharField(max_length=70, unique=True)),
],
),
migrations.CreateModel(
name='Semester',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('semester_no', models.PositiveIntegerField()),
('curriculum', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programme_curriculum.curriculum')),
],
options={
'unique_together': {('curriculum', 'semester_no')},
},
),
migrations.CreateModel(
name='Discipline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('programmes', models.ManyToManyField(to='programme_curriculum.Programme')),
],
),
migrations.AddField(
model_name='curriculum',
name='programme',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programme_curriculum.programme'),
),
migrations.CreateModel(
name='CourseSlot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('type', models.CharField(choices=[('Professional Core', 'Professional Core'), ('Professional Elective', 'Professional Elective'), ('Professional Lab', 'Professional Lab'), ('Engineering Science', 'Engineering Science'), ('Natural Science', 'Natural Science'), ('Humanities', 'Humanities'), ('Design', 'Design'), ('Manufacturing', 'Manufacturing'), ('Management Science', 'Management Science')], max_length=70)),
('course_slot_info', models.TextField(null=True)),
('courses', models.ManyToManyField(to='programme_curriculum.Course')),
('semester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programme_curriculum.semester')),
],
),
migrations.AlterUniqueTogether(
name='curriculum',
unique_together={('name', 'version')},
),
migrations.CreateModel(
name='Batch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('year', models.PositiveIntegerField(default=2021)),
('curriculum', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='programme_curriculum.curriculum')),
('discipline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programme_curriculum.discipline')),
],
options={
'unique_together': {('name', 'discipline', 'year')},
},
),
]
| StarcoderdataPython |
1684747 | """A Stack object for incremental sampling
"""
import argparse
import timeit
from typing import Dict, List, Tuple
from graph_tool import Graph
from graph_tool.inference import BlockState
from graph_tool.inference import minimize_blockmodel_dl
from evaluation import Evaluation
from sample import Sample
from samplestate import SampleState
from util import finetune_assignment
from util import load_graph
from util import partition_from_sample
class SampleStack(object):
def __init__(self, args: argparse.Namespace) -> None:
"""Creates a SampleStack object.
Parameters
---------
args : argparse.Namespace
the command-line arguments provided by the user
"""
# Load graph
# Create stack of samples
# Use List as stack
self.t_load_start = timeit.default_timer()
self.full_graph, self.true_block_assignment = load_graph(args)
self.t_load_end = timeit.default_timer()
self.stack = list() # type: List[Tuple[Graph, Sample]]
self.create_sample_stack(args)
self.t_sample_end = timeit.default_timer()
# End of __init__()
def create_sample_stack(self, args: argparse.Namespace):
"""Iteratively performs sampling to create the stack of samples.
Parameters
----------
args : argparse.Namespace
the command-line arguments provided by the user
"""
# Iteratively perform sampling
for iteration in range(args.sample_iterations):
if iteration == 0:
sampled_graph, sample = self.sample(self.full_graph, args)
else:
sampled_graph, sample = self.sample(self.full_graph, args, sample.state)
self.stack.append((sampled_graph, sample))
# End of create_sample_stack()
def sample(self, graph: Graph, args: argparse.Namespace, prev_state: SampleState = None) -> Tuple[Graph, Sample]:
"""Sample a set of vertices from the graph.
Parameters
----------
full_graph : Graph
the graph from which to sample vertices
args : Namespace
the parsed command-line arguments
prev_state : SampleState
if prev_state is not None, sample will be conditioned on the previously selected vertices
Returns
------
sampled_graph : Graph
the sampled graph created from the sampled Graph vertices
sample : Sample
the sample object containing the vertex and block mappings
"""
sample_size = int((self.full_graph.num_vertices() * (args.sample_size / 100)) / args.sample_iterations)
if prev_state is None:
prev_state = SampleState(sample_size)
sample_object = Sample.create_sample(self.full_graph, self.true_block_assignment, args, prev_state)
return sample_object.graph, sample_object
# End of sample()
def _push(self):
# Add a subsample to the stack
raise NotImplementedError()
# End of _push()
def _pop(self) -> Tuple[Graph, Sample]:
# Propagate a subsample's results up the stack
return self.stack.pop(0)
# End of _pop()
def unstack(self, args: argparse.Namespace, sampled_graph_partition: BlockState = None,
evaluation: Evaluation = None) -> Tuple[Graph, BlockState, Dict, Dict, Evaluation]:
"""Performs SBP on the first (innermost) sample. Merges said sample with the next in the stack, and performs
SBP on the combined results. Repeats the process until all samples have been partitioned.
Paramters
---------
args : argparse.Namespace
the command-line arguments supplied by the user
sampled_graph_partition : BlockState
the current partitioned state of the sampled graph. Default = None
evaluation : Evaluation
the current state of the evaluation of the algorithm. Default = None
Returns
-------
sampled_graph : Graph
the Graph object describing the combined samples
sampled_graph_partition : BlockState
the partition results of the combined samples
vertex_mapping : Dict[int, int]
the mapping of the vertices from the combined sample to the full graph
block_mapping : Dict[int, int]
the mapping of the communities/blocks from the combined sample to the full graph
"""
# Propagate results back through the stack
sampled_graph, sample = self._pop()
min_num_blocks = -1
# denominator = 2
# if args.sample_iterations > 1:
# min_num_blocks = int(sampled_graph.num_nodes / denominator)
# min_num_blocks = 0
if evaluation is None:
evaluation = Evaluation(args, sampled_graph)
print("Subgraph: V = {} E = {}".format(sampled_graph.num_vertices(), sampled_graph.num_edges()))
t0 = timeit.default_timer()
combined_partition = minimize_blockmodel_dl(sampled_graph,
shrink_args={'parallel': True}, verbose=args.verbose,
mcmc_equilibrate_args={'verbose': args.verbose, 'epsilon': 1e-4})
evaluation.sampled_graph_partition_time += (timeit.default_timer() - t0)
combined_sampled_graph = sampled_graph
while len(self.stack) > 0:
sampled_graph, next_sample = self._pop()
t0 = timeit.default_timer()
sample_partition = minimize_blockmodel_dl(sampled_graph,
shrink_args={'parallel': True}, verbose=args.verbose,
mcmc_equilibrate_args={'verbose': args.verbose, 'epsilon': 1e-4})
evaluation.sampled_graph_partition_time += (timeit.default_timer() - t0)
t1 = timeit.default_timer()
# TODO: fix this to allow multi-sample strategies
combined_partition, combined_sampled_graph, sample = self.combine_partition_with_sample(
combined_partition, sample_partition, sample, next_sample, args
)
t2 = timeit.default_timer()
# TODO: change to evaluation.merge_sample time?
evaluation.propagate_membership += (t2 - t1)
print("=====Performing final (combined) sample partitioning=====")
if min_num_blocks > 0 or (args.sample_iterations > 1):
combined_partition.num_blocks_to_merge = 0
sampled_graph_partition = minimize_blockmodel_dl(combined_sampled_graph,
shrink_args={'parallel': True}, verbose=args.verbose,
mcmc_equilibrate_args={'verbose': False, 'epsilon': 1e-4})
else:
sampled_graph_partition = combined_partition
return (
combined_sampled_graph, sampled_graph_partition, sample.vertex_mapping, sample.true_blocks_mapping,
evaluation
)
# End of unstack()
def extrapolate_sample_partition(self, sampled_graph_partition: BlockState, vertex_mapping: Dict[int, int],
args: argparse.Namespace,
evaluation: Evaluation) -> Tuple[Graph, BlockState, Evaluation]:
"""Extrapolates the partitioning results from the sample to the full graph.
This is done by first assigning to every unsampled vertex, the community to which it's most strongly
connected. Then, a fine-tuning step (MCMC updates using a modified Metropolis-Hasting algorithm) is run
on the result.
Parameters
----------
sampled_graph_partition : BlockState
the current partitioned state of the sampled graph
vertex_mapping : Dict[int, int]
the mapping of sample vertices to full vertices
args : argparse.Namespace
the command-line arguments supplied by the user
evaluation : Evaluation
the current state of the evaluation of the algorithm
Returns
-------
full_graph : Graph
the graph object representing the entire (unsampled) graph
full_graph_partition : BlockState
the partition state of the full graph after extrapolation and fine-tuning
evaluation : Evaluation
the evaluation results of the algorithm
"""
t1 = timeit.default_timer()
full_graph_partition = partition_from_sample(sampled_graph_partition, self.full_graph, vertex_mapping)
t2 = timeit.default_timer()
full_graph_partition = finetune_assignment(full_graph_partition, args)
t3 = timeit.default_timer()
evaluation.loading = self.t_load_end - self.t_load_start
evaluation.sampling = self.t_sample_end - self.t_load_end
evaluation.propagate_membership += (t2 - t1)
evaluation.finetune_membership += (t3 - t2)
return self.full_graph, full_graph_partition, evaluation
# End of extrapolate_sample_partition()
def tail(self) -> Tuple[Graph, Sample]:
# Get innermost sample
return self.stack[0]
# End of tail()
# End of SampleStack()
| StarcoderdataPython |
1797371 |
# major libraries import
from numpy import array, concatenate
#project libraries import
from Regulator import Regulator
#from TTi import PStsx3510P
#from Agilent import A34401_temp
class PIDHeater(Regulator):
"""
Provides P-I-D temperature regulation using 10 A power source
and temperature platinum resistor temperature sensor
"""
class PIDregul:
Kp = 0.1 # gain
Ti = 0.0 # Integral Time
Tii = 0.0 # Integral time
Td = 0.0 # Derivative time
P = 0.0
I = 0.0
D = 0.0
error = None
pastError = None
def __init__(self, Valve, Sensor, Kp=0.1, Ti=None, Td=None, looptime=None):
if looptime == None:
loopTime = 1.0 # default regulator loop time 1s
Regulator.__init__(self, Valve, Sensor, looptime)
#self.sensor = sensor
#self.valve = valve
self.valve.Set(0)
self.valve.set_output_on()
self.Readings.update(self.sensor.Readings) # include the readings of the sensor
self.Readings.update(self.valve.Readings) # Valve readings are unnecessary
self.valveError = 0
self.senseError = 0
self.pid = self.PIDregul()
self.set_PID_params(Kp, Ti, Td, looptime)
return
def Measure(self):
if self.setpoint.ready:
timestamp = self.timer.makeTimestamp()
setpointOldness = timestamp - self.setpoint.timestamp
sigma = self.setpoint.sigma / self.setpoint.loopCounter
self.Readings['SPage(s)'] = array([setpointOldness])
self.Readings['sigma()'] = array([sigma])
setpointError = 0
else:
self.Readings['SPage(s)'] = array([0.0])
self.Readings['sigma()'] = array([0.0])
setpointError = 1 # 1 = setpoint invalidated
self.Readings.update(self.sensor.Readings) # include the readings of the sensor
self.Readings.update(self.valve.Readings) # and valve
measurement_error = self.senseError + 100 * self.valveError + 1000 * setpointError
return measurement_error
def _calculateNew(self): # implemented PID regulator
if not self.pid.error == None:
self.pid.pastError = self.pid.error
else:
self.pid.pastError = 0
self.pid.error = - self.setpoint.deviation
if self.valve.out_of_bounds: # eliminate integrator windup
self.pid.Ti = None
else:
self.pid.Ti = self.pid.Tii
if not self.pid.Ti == None:
self.pid.I = self.pid.I + self.fb.measLoopTime / self.pid.Ti * self.pid.error
else:
self.pid.I = 0
if not self.pid.Td == None:
self.pid.D = (self.pid.error - self.pid.pastError) / self.pid.Td / self.fb.measLoopTime
else:
self.pid.D = 0
signal = self.pid.Kp * (self.pid.error + self.pid.I + self.pid.D)
self.fb.signal = signal
return
def _adjust(self):
#print 'setpoint:', self.setpoint.value, self.fb.signal, self.setpoint.valid, self.setpoint.ready
#print 'error:', self.pid.error , 'I:', self.pid.I , 'D:', self.pid.D, 'Kp:' , self.pid.Kp
self.valve.Set(self.fb.signal)
return
def _measure_sensor(self):
self.senseError = self.sensor.Measure() # get the instrumnet reading
self.valveError = self.valve.Measure()
keys = self.sensor.Readings.keys()
value = self.sensor.Readings[keys[0]] # first field in array is reading from the sensor
self.setpoint.actualValue = value
return value
def set_PID_params(self, Kp, Ti=None, Td=None, looptime=None):
self.pid.Kp = Kp
self.pid.Ti = Ti
self.pid.Tii = Ti
self.pid.Td = Td
if not (looptime == None):
self.fb.loopTime = looptime
else:
self.fb.loopTime = 1.0
self.resetI()
return
def resetI(self):
self.pid.I = 0.0
return
| StarcoderdataPython |
3382482 | import argparse
import os
import torch
import logging
from init_tool import init_all
from config_parser import create_config
from train_tool import train
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c', default="config/default.config", help="specific config file", required=False)
parser.add_argument('--gpu', '-g', default='0', help="gpu id list")
parser.add_argument('--checkpoint', help="checkpoint file path")
parser.add_argument('--do_test', help="do test while training or not", default=False, action="store_true")
args = parser.parse_args()
gpu_list = []
if args.gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device_list = args.gpu.split(",")
for a in range(0, len(device_list)):
gpu_list.append(int(a))
config = create_config(args.config)
if not torch.cuda.is_available() and len(gpu_list) > 0:
logger.error("CUDA is not available but specific gpu id")
raise NotImplementedError
else:
logger.info("CUDA available")
parameters = init_all(config, gpu_list, args.checkpoint, "train")
train(parameters, config, gpu_list, args.do_test)
| StarcoderdataPython |
1651321 | <filename>src/original/McGill_Can/vfa.py
import numpy as np
from numpy.linalg import norm
def despot(signal, alpha, TR):
# Ref: <NAME>., <NAME>., & <NAME>. (2005). MRM 53(1), 237–241. https://doi.org/10.1002/mrm.20314
# Based on Matlab code by <NAME>, McGill University
x = signal / np.tan(alpha)
y = signal / np.sin(alpha)
numerator = np.sum(x * y, axis = -1) - np.sum(x, axis = -1) * np.sum(y, axis = -1) / len(alpha)
denominator = np.sum(x * x, axis = -1) - np.sum(x, axis = -1)**2 / len(alpha)
slope = numerator / denominator
intercept = np.mean(y, axis = -1) - slope * np.mean(x, axis = -1)
M0 = intercept / (1 - slope)
T1 = -TR / np.log(slope)
return M0, T1
def novifast(signal, alpha, TR, initialvalues = [5000, 1500], maxiter = 10, tol = 1e-6, doiterative = True):
# Ref: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
# <NAME>., <NAME>., and <NAME>.
# NOVIFAST: a fast algorithm for accurate and precise VFA MRI T1 mapping.
# IEEE Trans. Med. Imag., early access, doi:10.1109/TMI.2018.2833288
spatialdims = signal.shape[:-1]
if not spatialdims: spatialdims = [1]
numvox = np.prod(spatialdims)
numangles = signal.shape[-1]
y = signal.reshape(-1, numangles)
sinfa = np.asmatrix(np.sin(alpha))
sinfa = np.broadcast_to(sinfa,(numvox, numangles))
cosfa = np.asmatrix(np.cos(alpha))
cosfa = np.broadcast_to(cosfa,(numvox, numangles))
initialM0, initialT1 = initialvalues
# solA and solB and c1 and c2 in paper
solA = np.repeat(initialM0 * (1 * np.exp(-TR/initialT1)), numvox)
solB = np.repeat(np.exp(-TR/initialT1), numvox)
k = 0
done = False
while not done:
solB_prev = np.copy(solB)
solA = np.broadcast_to(np.asmatrix(solA).T, (numvox, numangles))
solB = np.broadcast_to(np.asmatrix(solB).T, (numvox, numangles))
# Based on equations 24 to 27 in paper
denominator = 1 - cosfa * solB
Z = y / denominator
A = cosfa * Z
B = sinfa / denominator
Abar = cosfa * B * solA / denominator
# Calculate terms in Eq. 28 of paper
BB = np.sum(B * B, axis = 1)
BA = np.sum(B * A, axis = 1)
BZ = np.sum(B * Z, axis = 1)
AAbar = np.sum(A * Abar, axis = 1)
BAbar = np.sum(B * Abar, axis = 1)
ZAbar = np.sum(Z * Abar, axis = 1)
determinant = BB * AAbar - BAbar * BA
solA = (BZ * AAbar - ZAbar * BA) / determinant
solB = (BB * ZAbar - BAbar * BZ) / determinant
k += 1
if not doiterative:
done = True
else:
err = norm(solB - solB_prev) / norm(solB)
if err < tol or k >= maxiter:
done = True
M0 = solA/(1-solB)
T1 = -TR/np.log(solB)
M0 = M0.reshape(spatialdims)
T1 = T1.reshape(spatialdims)
return M0, T1
| StarcoderdataPython |
98568 | <filename>v2.5.7/toontown/estate/DistributedTargetAI.py<gh_stars>1-10
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
import CannonGlobals, random
class DistributedTargetAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTargetAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.enabled = 0
self.highscore = 0
self.scoreDict = {}
self.__newGame()
def announceGenerate(self):
DistributedObjectAI.announceGenerate(self)
taskMgr.doMethodLater(10, self.__startNewGame, self.taskName('newGame'))
def __newGame(self):
self.power = 1
self.time = CannonGlobals.CANNON_TIMEOUT
def getPosition(self):
return (0, 0, 40)
def getState(self):
return (
self.enabled, 2 ** self.power, self.time)
def d_updateState(self):
self.sendUpdate('setState', self.getState())
def d_setReward(self, reward):
self.sendUpdate('setReward', [reward])
def setResult(self, avId):
if avId and self.enabled:
self.power += 1
self.time = int(CannonGlobals.CANNON_TIMEOUT / self.power)
taskMgr.remove(self.taskName('gameover'))
taskMgr.doMethodLater(self.time, self.__gameOver, self.taskName('gameover'))
self.d_updateState()
def __gameOver(self, task):
self.enabled = 0
self.time = 0
self.d_updateState()
minutes = random.randrange(0, 3)
half = random.randrange(0, 2)
if half or minutes == 0:
minutes += 0.5
taskMgr.doMethodLater(minutes * 60, self.__startNewGame, self.taskName('newGame'))
for avId in self.scoreDict:
av = self.air.doId2do.get(avId)
if av:
if av.zoneId == self.zoneId:
av.toonUp(2 ** self.power)
return task.done
def __startNewGame(self, task):
self.enabled = 1
self.__newGame()
self.d_updateState()
taskMgr.doMethodLater(self.time, self.__gameOver, self.taskName('gameover'))
return task.done
def setBonus(self, bonus):
pass
def setCurPinballScore(self, avId, score, bonus):
av = self.air.doId2do.get(avId)
if not av:
return
S = score * bonus
self.scoreDict[avId] = S
if S > self.highscore:
self.highscore = S
self.d_updateHighscore(av, S)
def d_updateHighscore(self, av, score):
self.sendUpdate('setPinballHiScorer', [av.getName()])
self.sendUpdate('setPinballHiScore', [score])
def delete(self):
taskMgr.remove(self.taskName('newGame'))
taskMgr.remove(self.taskName('gameover')) | StarcoderdataPython |
4803346 | <reponame>Rohitm619/Softuni-Python-Basic
print ("Hello SoftUni") | StarcoderdataPython |
1718457 | <gh_stars>1-10
#The MIT License
#
#Copyright (c) 2017 DYNI machine learning & bioacoustics team - Univ. Toulon
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
from os.path import join
from librosa.core.audio import load as load_audio
import numpy as np
from dynibatch.utils.exceptions import DynibatchError
from dynibatch.features.extractors.segment_feature import SegmentFeatureExtractor
logger = logging.getLogger(__name__)
class AudioChunkExtractor(SegmentFeatureExtractor):
"""Extracts the audio chunk corresponding to every segment in a segment
container.
The last chunk of audio, shorter than the segment size, is ignored,
excepted when there is only one chunk, in which case it is padded with 0s to match
segment size.
"""
def __init__(self, audio_root, sample_rate):
"""Initializes audio chunk extractor.
Args:
audio_root (str): audio files root path
sample_rate (int): sample rate of all audio files in audio_root (they
must all have the same sample rate)
"""
super().__init__()
self._audio_root = audio_root
self._sample_rate = sample_rate
@property
def name(self):
return self.__module__.split('.')[-1]
def execute(self, segment_container):
"""Executes the audio chunk extractor.
Args:
segment_container (SegmentContainer)
"""
audio_path = join(self._audio_root, segment_container.audio_path)
audio, sr = load_audio(audio_path, sr=self._sample_rate)
for seg in segment_container.segments:
start_time = seg.start_time
end_time = seg.end_time
n_samples = int(np.rint(
(end_time - start_time) * self._sample_rate))
start_ind = int(start_time * self._sample_rate)
if start_ind + n_samples > len(audio):
if start_ind == 0:
# The audio size is smaller than the segment size,
# so we pad it with 0s
seg.features[self.name] = np.zeros((n_samples,))
seg.features[self.name][:len(audio)] = audio
else:
# Ignore if it is not the first segment
continue
else:
seg.features[self.name] = audio[start_ind:start_ind+n_samples]
| StarcoderdataPython |
53293 | <reponame>kdart/pycopia
# python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject
# imports
from SNMPv2_SMI import MODULE_IDENTITY, OBJECT_TYPE, snmpModules
from SNMPv2_CONF import MODULE_COMPLIANCE, OBJECT_GROUP
from SNMPv2_TC import RowStatus, StorageType
from SNMP_TARGET_MIB import SnmpTagValue, snmpTargetBasicGroup, snmpTargetResponseGroup
from SNMP_FRAMEWORK_MIB import SnmpEngineID, SnmpAdminString
class SNMP_PROXY_MIB(ModuleObject):
path = '/usr/share/mibs/ietf/SNMP-PROXY-MIB'
name = 'SNMP-PROXY-MIB'
language = 2
description = 'This MIB module defines MIB objects which provide\nmechanisms to remotely configure the parameters\nused by a proxy forwarding application.\n\nCopyright (C) The Internet Society (2002). This\nversion of this MIB module is part of RFC 3413;\nsee the RFC itself for full legal notices.'
# nodes
class snmpProxyMIB(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14])
name = 'snmpProxyMIB'
class snmpProxyObjects(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1])
name = 'snmpProxyObjects'
class snmpProxyConformance(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 3])
name = 'snmpProxyConformance'
class snmpProxyCompliances(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 3, 1])
name = 'snmpProxyCompliances'
class snmpProxyGroups(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 3, 2])
name = 'snmpProxyGroups'
# macros
# types
# scalars
# columns
class snmpProxyName(ColumnObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 1])
syntaxobject = SnmpAdminString
class snmpProxyType(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'read'), Enum(2, 'write'), Enum(3, 'trap'), Enum(4, 'inform')]
class snmpProxyContextEngineID(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 3])
syntaxobject = SnmpEngineID
class snmpProxyContextName(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 4])
syntaxobject = SnmpAdminString
class snmpProxyTargetParamsIn(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 5])
syntaxobject = SnmpAdminString
class snmpProxySingleTargetOut(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 6])
syntaxobject = SnmpAdminString
class snmpProxyMultipleTargetOut(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 7])
syntaxobject = SnmpTagValue
class snmpProxyStorageType(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.StorageType
class snmpProxyRowStatus(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1, 9])
syntaxobject = pycopia.SMI.Basetypes.RowStatus
# rows
class snmpProxyEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([snmpProxyName], True)
create = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 1, 2, 1])
access = 2
rowstatus = snmpProxyRowStatus
columns = {'snmpProxyName': snmpProxyName, 'snmpProxyType': snmpProxyType, 'snmpProxyContextEngineID': snmpProxyContextEngineID, 'snmpProxyContextName': snmpProxyContextName, 'snmpProxyTargetParamsIn': snmpProxyTargetParamsIn, 'snmpProxySingleTargetOut': snmpProxySingleTargetOut, 'snmpProxyMultipleTargetOut': snmpProxyMultipleTargetOut, 'snmpProxyStorageType': snmpProxyStorageType, 'snmpProxyRowStatus': snmpProxyRowStatus}
# notifications (traps)
# groups
class snmpProxyGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 14, 3, 2, 3])
group = [snmpProxyType, snmpProxyContextEngineID, snmpProxyContextName, snmpProxyTargetParamsIn, snmpProxySingleTargetOut, snmpProxyMultipleTargetOut, snmpProxyStorageType, snmpProxyRowStatus]
# capabilities
# special additions
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
| StarcoderdataPython |
89320 | <filename>src/assignments/main_assignment7.py<gh_stars>0
from src.assignments.assignment7 import sum_list_values
'''
Create a function named process_list that calls the sum_list_values function.
Prints the list values and the sum of the element in the list as follows:
joe 10 15 20 30 40 sum: 115
process_list(['joe', 10, 15, 20, 30, 40])
Create a main function.
In the function loop as long as user wants to add another list.
Prompt the user for name and append to the list.
Prompt the user for number of numeric values in the list.
Iterate the number of times the user enters and prompt end-user for n numeric values.
Call the main function
--------------------
joe 10 15 20 30 40
bill 23 16 19 22
sue 8 22 17 14 32 17 24 21 2 9 11 17
grace 12 28 21 45 26 10
john 14 32 25 16 89
'''
def process_list(list1):
total = sum_list_values(list1)
print(list1, 'sum: ',total)
def main():
new_list = []
keep_going = 'y'
while keep_going == 'y':
name = input('Print the name you wish to append: ')
new_list.append(name)
num = int(input('How many numbers need to be entered? '))
i = 0
while i < num:
new_num = int(input('Enter numeric values for the list: '))
new_list.append(new_num)
i += 1
process_list(new_list)
keep_going = input('Enter y to keep going')
main()
| StarcoderdataPython |
3205944 | <reponame>skateman/insights-core<filename>insights/specs/datasources/ipcs.py
"""
Custom datasources to get the semid of all the inter-processes.
"""
from insights.core.context import HostContext
from insights.core.plugins import datasource
from insights.specs import Specs
from insights.core.dr import SkipComponent
@datasource(Specs.ipcs_s, HostContext)
def semid(broker):
"""
This datasource provides a list of the semid of all the inter-processes.
Note:
This datasource may be executed using the following command:
``insights cat --no-header ipcs_s_i``
Sample output::
[
'65570', '98353', '98354'
]
Returns:
list: A list of the semid of all the inter-processes.
"""
allowed_owners = ['root', 'apache', 'oracle']
content = broker[Specs.ipcs_s].content
results = set()
for s in content:
s_splits = s.split()
# key semid owner perms nsems
# 0x00000000 65536 apache 600 1
if len(s_splits) == 5 and s_splits[1].isdigit() and s_splits[2] in allowed_owners:
results.add(s_splits[1])
if results:
return list(results)
raise SkipComponent
| StarcoderdataPython |
28434 | <gh_stars>0
#
# Copyright (c) 2013 Docker, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.utils import importutils
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
from testtools import skipIf
from ..resources import docker_container # noqa
from .fake_docker_client import FakeDockerClient # noqa
docker = importutils.try_import('docker')
template = '''
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Test template",
"Parameters": {},
"Resources": {
"Blog": {
"Type": "DockerInc::Docker::Container",
"Properties": {
"image": "samalba/wordpress",
"env": [
"FOO=bar"
]
}
}
}
}
'''
class DockerContainerTest(HeatTestCase):
def setUp(self):
super(DockerContainerTest, self).setUp()
for res_name, res_class in docker_container.resource_mapping().items():
resource._register_class(res_name, res_class)
self.addCleanup(self.m.VerifyAll)
def create_container(self, resource_name):
t = template_format.parse(template)
stack = utils.parse_stack(t)
resource = docker_container.DockerContainer(
resource_name,
stack.t.resource_definitions(stack)[resource_name], stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
return resource
def get_container_state(self, resource):
client = resource.get_client()
return client.inspect_container(resource.resource_id)['State']
def test_resource_create(self):
container = self.create_container('Blog')
self.assertTrue(container.resource_id)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
client = container.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertIsNone(client.container_create[0]['name'])
def test_create_with_name(self):
t = template_format.parse(template)
stack = utils.parse_stack(t)
definition = stack.t.resource_definitions(stack)['Blog']
definition['Properties']['name'] = 'super-blog'
resource = docker_container.DockerContainer(
'Blog', definition, stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual('super-blog', client.container_create[0]['name'])
@mock.patch.object(docker_container.DockerContainer, 'get_client')
def test_create_failed(self, test_client):
mock_client = mock.Mock()
mock_client.inspect_container.return_value = {
"State": {
"ExitCode": -1
}
}
mock_client.logs.return_value = "Container startup failed"
test_client.return_value = mock_client
mock_stack = mock.Mock()
mock_stack.db_resource_get.return_value = None
res_def = mock.Mock(spec=rsrc_defn.ResourceDefinition)
docker_res = docker_container.DockerContainer("test", res_def,
mock_stack)
exc = self.assertRaises(resource.ResourceInError,
docker_res.check_create_complete,
'foo')
self.assertIn("Container startup failed", six.text_type(exc))
def test_start_with_bindings_and_links(self):
t = template_format.parse(template)
stack = utils.parse_stack(t)
definition = stack.t.resource_definitions(stack)['Blog']
definition['Properties']['port_bindings'] = {
'80/tcp': [{'HostPort': '80'}]}
definition['Properties']['links'] = {'db': 'mysql'}
resource = docker_container.DockerContainer(
'Blog', definition, stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'db': 'mysql'}, client.container_start[0]['links'])
self.assertEqual(
{'80/tcp': [{'HostPort': '80'}]},
client.container_start[0]['port_bindings'])
def test_resource_attributes(self):
container = self.create_container('Blog')
# Test network info attributes
self.assertEqual('172.17.42.1', container.FnGetAtt('network_gateway'))
self.assertEqual('172.17.0.3', container.FnGetAtt('network_ip'))
self.assertEqual('1080', container.FnGetAtt('network_tcp_ports'))
self.assertEqual('', container.FnGetAtt('network_udp_ports'))
# Test logs attributes
self.assertEqual('---logs_begin---', container.FnGetAtt('logs_head'))
self.assertEqual('---logs_end---', container.FnGetAtt('logs_tail'))
# Test a non existing attribute
self.assertRaises(exception.InvalidTemplateAttribute,
container.FnGetAtt, 'invalid_attribute')
def test_resource_delete(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
def test_resource_already_deleted(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
@skipIf(docker is None, 'docker-py not available')
def test_resource_delete_exception(self):
response = mock.MagicMock()
response.status_code = 404
response.content = 'some content'
container = self.create_container('Blog')
self.m.StubOutWithMock(container.get_client(), 'kill')
container.get_client().kill(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.StubOutWithMock(container, '_get_container_status')
container._get_container_status(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.ReplayAll()
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
def test_resource_suspend_resume(self):
container = self.create_container('Blog')
# Test suspend
scheduler.TaskRunner(container.suspend)()
self.assertEqual((container.SUSPEND, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
# Test resume
scheduler.TaskRunner(container.resume)()
self.assertEqual((container.RESUME, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
| StarcoderdataPython |
1631611 | '''A library of Python timeslot functions and terms for use within an ASP
program. When reasoning with time it is often unnecessary (and expensive) to
reason at the minute (or smaller) granularity. Instead it is often useful to
reason in multi-minute time blocks, such as 15 minute blocks.
This library provides a flexible mechanism to define timeslots and the
provides functions for converting times to timeslots. It also provides ASP
callable functions. The limitation is that the time granurality is minute
based and the must divide a day evenly (e.g., 15 minute blocks).
Note: functions that are ASP callable have a prefix ``cl_``.
.. code-block:: none
date(@cl_date_range("2018-01-01", "2018-01-10")).
This will generate a number of ``date/1`` facts, each containing a date
encoded string between the desired two dates.
'''
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
import datetime
import math
from ..orm import ComplexTerm, IntegerField, StringField,\
ConstantField, make_function_asp_callable, make_method_asp_callable
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class TimeField(StringField):
'''A Clorm TimeField that converts to and from a Python datetime.time
object. Encodes time as a string in "HH:MM" format.
'''
def _cltopy(timestr):
dtstr = "2000-01-01 {}".format(timestr)
return datetime.datetime.strptime(dtstr,"%Y-%m-%d %H:%M").time()
cltopy = _cltopy
pytocl = lambda tm: tm.strftime("%H:%M")
#------------------------------------------------------------------------------
# Granularity for a timeslot
#------------------------------------------------------------------------------
class Granularity(object):
MINUTES_PER_HOUR=60
MINUTES_PER_DAY=24*MINUTES_PER_HOUR
def __init__(self, hours=0, minutes=0):
# Calculate the granularity in minutes
tmp = hours*Granularity.MINUTES_PER_HOUR + minutes
td = datetime.timedelta(minutes=tmp)
if Granularity.MINUTES_PER_DAY % tmp != 0:
raise ValueError(("Granularity of {} does not evenly divide 24 "
"hours".format(td)))
self._minutes = tmp
self._timedelta = datetime.timedelta(minutes=self._minutes)
def num_per_day(self):
return int(Granularity.MINUTES_PER_DAY/self._minutes)
def minutes(self):
return self._minutes
def timedelta(self):
return self._timedelta
def num_to_minutes(self, num):
return self._minutes*num
def num_to_timedelta(self, num):
return datetime.timedelta(minutes=(self._minutes*num))
def minutes_to_num(self, minutes):
return minutes/self._minutes
def timedelta_to_num(self, delta):
'''Converts timedelta to num intervals (returns a float).'''
delta_mins = delta.total_seconds()/60.0
return delta_mins/self._minutes
# --------------------------------------------------------------------------
# Generate some wrapper functions
# --------------------------------------------------------------------------
cl_num_per_day = make_method_asp_callable(IntegerField, num_per_day)
cl_minutes = make_method_asp_callable(IntegerField, minutes)
cl_num_to_minutes = make_method_asp_callable(IntegerField, IntegerField, num_to_minutes)
cl_minutes_to_num = make_method_asp_callable(IntegerField, IntegerField, minutes_to_num)
#------------------------------------------------------------------------------
# TimeSlot is a tuple containing an index and a time object.
#------------------------------------------------------------------------------
class TimeSlot(ComplexTerm):
'''An enumerated complex term for encoding time slots.
'''
idx = IntegerField()
start = TimeField()
class Meta: is_tuple=True
#------------------------------------------------------------------------------
# An enumerated date range class
#------------------------------------------------------------------------------
class Range(object):
'''A class to generate a timeslots of a given granularity
'''
ZERO_TIME=datetime.time(hour=0,minute=0,second=0)
def __init__(self, granularity):
self._granularity = granularity
num_timeslots = self._granularity.num_per_day()
self._starttime_to_timeslot = {}
self._timeslots = []
currdt = datetime.datetime.combine(datetime.date.today(), Range.ZERO_TIME)
for idx in range(0, num_timeslots):
ts = TimeSlot(idx=idx, start=currdt.time())
currdt += self._granularity.timedelta()
self._timeslots.append(ts)
self._starttime_to_timeslot[ts.start] = ts
# --------------------------------------------------------------------------
#
# --------------------------------------------------------------------------
@property
def granularity(self):
return self._granularity
# --------------------------------------------------------------------------
#
# --------------------------------------------------------------------------
def num_timeslots(self):
return self._granularity.num_per_day()
def range(self):
return list(self._timeslots)
def timeslot(self, idx):
return self._timeslots[idx]
# --------------------------------------------------------------------------
#
# --------------------------------------------------------------------------
def _timeslot_partial_idx(self, time):
ct = datetime.datetime.combine(datetime.date.today(), time)
zt = datetime.datetime.combine(datetime.date.today(), Range.ZERO_TIME)
delta = ct - zt
delta_mins = delta.total_seconds()/60
return delta_mins/self._granularity.minutes()
def timeslot_round(self, time):
idx = round(self._timeslot_partial_idx(time))
return self.timeslot(idx)
def timeslot_ceil(self, time):
idx = math.ceil(self._timeslot_partial_idx(time))
return self.timeslot(idx)
def timeslot_floor(self, time):
idx = math.floor(self._timeslot_partial_idx(time))
return self.timeslot(idx)
# --------------------------------------------------------------------------
#
# --------------------------------------------------------------------------
cl_range = make_method_asp_callable([TimeSlot.Field], range)
cl_num_timeslots = make_method_asp_callable(IntegerField, num_timeslots)
cl_timeslot = make_method_asp_callable(IntegerField, TimeSlot.Field, timeslot)
cl_timeslot_round = make_method_asp_callable(TimeField, TimeSlot.Field, timeslot_round)
cl_timeslot_ceil = make_method_asp_callable(TimeField, TimeSlot.Field, timeslot_ceil)
cl_timeslot_floor = make_method_asp_callable(TimeField, TimeSlot.Field, timeslot_floor)
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Generate some wrapper functions
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
if __name__ == "__main__":
raise RuntimeError('Cannot run modules')
| StarcoderdataPython |
3332969 | <filename>hue_control/hue_light.py<gh_stars>0
################################################################################################################################
# *** Copyright Notice ***
#
# "Price Based Local Power Distribution Management System (Local Power Distribution Manager) v1.0"
# Copyright (c) 2016, The Regents of the University of California, through Lawrence Berkeley National Laboratory
# (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this software, please contact
# Berkeley Lab's Innovation & Partnerships Office at <EMAIL>.
################################################################################################################################
from hue_control import HueBridge
# Controls for a group of one or more lights within the system
class HueLight:
def __init__(self, bridge, light='1'):
"Instantiates control over properties of light"
if isinstance(bridge, HueBridge):
self.bridge = bridge
else:
print('No bridge provided, assuming default bridge')
self.bridge = HueBridge()
self.light = light
def get_info(self):
"Returns a dict holding info about this light/lights"
return self.bridge.get_light(self.light)
def rename(self, name):
"Changes name of light in Hue system to that passed in"
self.bridge.update_light_attributes(self.light, {'name': name})
def on(self, brightness=200, color=160):
"Turns light on at brightness and color passed in"
resource = {'on': (not brightness == 0), 'hue': color, 'bri': brightness}
self.bridge.update_light_state(self.light, resource)
def update_state(self, state):
"Updates state information to brightness and color passed in"
self.bridge.update_light_state(self.light, state)
def off(self):
"Turns light off"
resource = {'on': False}
self.bridge.update_light_state(self.light, resource)
| StarcoderdataPython |
164411 | <gh_stars>0
##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Dublin Core support for content types.
"""
from AccessControl.SecurityInfo import ClassSecurityInfo
from AccessControl.SecurityManagement import getSecurityManager
from Acquisition import aq_base
from AccessControl.class_init import InitializeClass
from App.special_dtml import DTMLFile
from DateTime.DateTime import DateTime
from OFS.PropertyManager import PropertyManager
from zope.component import queryUtility
from zope.interface import implements
from Products.CMFCore.interfaces import ICatalogableDublinCore
from Products.CMFCore.interfaces import IDublinCore
from Products.CMFCore.interfaces import IMetadataTool
from Products.CMFCore.interfaces import IMutableDublinCore
from Products.CMFDefault.permissions import ModifyPortalContent
from Products.CMFDefault.permissions import View
from Products.CMFDefault.utils import _dtmldir
from Products.CMFDefault.utils import semi_split
from Products.CMFDefault.utils import tuplize
_marker=[]
# For http://www.zope.org/Collectors/CMF/325
# We only really need this once, at startup.
_zone = DateTime().timezone()
class DefaultDublinCoreImpl( PropertyManager ):
""" Mix-in class which provides Dublin Core methods.
"""
implements(IDublinCore, ICatalogableDublinCore, IMutableDublinCore)
security = ClassSecurityInfo()
def __init__( self
, title=''
, subject=()
, description=''
, contributors=()
, effective_date=None
, expiration_date=None
, format='text/html'
, language=''
, rights=''
):
now = DateTime()
self.creation_date = now
self.modification_date = now
self.creators = ()
self._editMetadata( title
, subject
, description
, contributors
, effective_date
, expiration_date
, format
, language
, rights
)
#
# Set-modification-date-related methods.
# In DefaultDublinCoreImpl for lack of a better place.
#
# Class variable default for an upgrade.
modification_date = None
security.declarePrivate('notifyModified')
def notifyModified(self):
""" Take appropriate action after the resource has been modified.
Update creators and modification_date.
"""
self.addCreator()
self.setModificationDate()
security.declareProtected(ModifyPortalContent, 'addCreator')
def addCreator(self, creator=None):
""" Add creator to Dublin Core creators.
"""
if creator is None:
user = getSecurityManager().getUser()
creator = user and user.getId()
# call self.listCreators() to make sure self.creators exists
if creator and not creator in self.listCreators():
self.creators = self.creators + (creator, )
security.declareProtected(ModifyPortalContent, 'setModificationDate')
def setModificationDate(self, modification_date=None):
""" Set the date when the resource was last modified.
When called without an argument, sets the date to now.
"""
if modification_date is None:
self.modification_date = DateTime()
else:
self.modification_date = self._datify(modification_date)
#
# DublinCore interface query methods
#
security.declareProtected(View, 'Title')
def Title( self ):
""" Dublin Core Title element - resource name.
"""
return self.title
security.declareProtected(View, 'listCreators')
def listCreators(self):
""" List Dublin Core Creator elements - resource authors.
"""
if not hasattr(aq_base(self), 'creators'):
# for content created with CMF versions before 1.5
owner_tuple = self.getOwnerTuple()
if owner_tuple:
self.creators = (owner_tuple[1],)
else:
self.creators = ()
return self.creators
security.declareProtected(View, 'Creator')
def Creator(self):
""" Dublin Core Creator element - resource author.
"""
creators = self.listCreators()
return creators and creators[0] or ''
security.declareProtected(View, 'Subject')
def Subject( self ):
""" Dublin Core Subject element - resource keywords.
"""
return getattr( self, 'subject', () ) # compensate for *old* content
security.declareProtected(View, 'Description')
def Description( self ):
""" Dublin Core Description element - resource summary.
"""
return self.description
security.declareProtected(View, 'Publisher')
def Publisher(self):
""" Dublin Core Publisher element - resource publisher.
"""
tool = queryUtility(IMetadataTool)
if tool is not None:
return tool.getPublisher()
return 'No publisher'
security.declareProtected(View, 'listContributors')
def listContributors(self):
""" Dublin Core Contributor elements - resource collaborators.
"""
return self.contributors
security.declareProtected(View, 'Contributors')
def Contributors(self):
""" Deprecated alias of listContributors.
"""
return self.listContributors()
security.declareProtected(View, 'Date')
def Date( self, zone=None ):
""" Dublin Core Date element - default date.
"""
if zone is None:
zone = _zone
# Return effective_date if set, modification date otherwise
date = getattr(self, 'effective_date', None )
if date is None:
date = self.modified()
return date.toZone(zone).ISO()
security.declareProtected(View, 'CreationDate')
def CreationDate( self, zone=None ):
""" Dublin Core Date element - date resource created.
"""
if zone is None:
zone = _zone
# return unknown if never set properly
if self.creation_date:
return self.creation_date.toZone(zone).ISO()
else:
return 'Unknown'
security.declareProtected(View, 'EffectiveDate')
def EffectiveDate( self, zone=None ):
""" Dublin Core Date element - date resource becomes effective.
"""
if zone is None:
zone = _zone
ed = getattr( self, 'effective_date', None )
return ed and ed.toZone(zone).ISO() or 'None'
security.declareProtected(View, 'ExpirationDate')
def ExpirationDate( self, zone=None ):
""" Dublin Core Date element - date resource expires.
"""
if zone is None:
zone = _zone
ed = getattr( self, 'expiration_date', None )
return ed and ed.toZone(zone).ISO() or 'None'
security.declareProtected(View, 'ModificationDate')
def ModificationDate( self, zone=None ):
""" Dublin Core Date element - date resource last modified.
"""
if zone is None:
zone = _zone
return self.modified().toZone(zone).ISO()
security.declareProtected(View, 'Type')
def Type( self ):
""" Dublin Core Type element - resource type.
"""
ti = self.getTypeInfo()
return ti is not None and ti.Title() or 'Unknown'
security.declareProtected(View, 'Format')
def Format( self ):
""" Dublin Core Format element - resource format.
"""
return self.format
security.declareProtected(View, 'Identifier')
def Identifier( self ):
""" Dublin Core Identifier element - resource ID.
"""
# XXX: fixme using 'portal_metadata' (we need to prepend the
# right prefix to self.getPhysicalPath().
return self.absolute_url()
security.declareProtected(View, 'Language')
def Language( self ):
""" Dublin Core Language element - resource language.
"""
return self.language
security.declareProtected(View, 'Rights')
def Rights( self ):
""" Dublin Core Rights element - resource copyright.
"""
return self.rights
#
# DublinCore utility methods
#
def content_type( self ):
""" WebDAV needs this to do the Right Thing (TM).
"""
return self.Format()
__FLOOR_DATE = DateTime( 1970, 0 ) # always effective
security.declareProtected(View, 'isEffective')
def isEffective( self, date ):
""" Is the date within the resource's effective range?
"""
pastEffective = ( self.effective_date is None
or self.effective_date <= date )
beforeExpiration = ( self.expiration_date is None
or self.expiration_date >= date )
return pastEffective and beforeExpiration
#
# CatalogableDublinCore methods
#
security.declareProtected(View, 'created')
def created( self ):
""" Dublin Core Date element - date resource created.
"""
# allow for non-existent creation_date, existed always
date = getattr( self, 'creation_date', None )
return date is None and self.__FLOOR_DATE or date
security.declareProtected(View, 'effective')
def effective( self ):
""" Dublin Core Date element - date resource becomes effective.
"""
marker = []
date = getattr( self, 'effective_date', marker )
if date is marker:
date = getattr( self, 'creation_date', None )
return date is None and self.__FLOOR_DATE or date
__CEILING_DATE = DateTime( 2500, 0 ) # never expires
security.declareProtected(View, 'expires')
def expires( self ):
""" Dublin Core Date element - date resource expires.
"""
date = getattr( self, 'expiration_date', None )
return date is None and self.__CEILING_DATE or date
security.declareProtected(View, 'modified')
def modified( self ):
""" Dublin Core Date element - date resource last modified.
"""
date = self.modification_date
if date is None:
# Upgrade.
date = self.bobobase_modification_time()
self.modification_date = date
return date
security.declareProtected(View, 'getMetadataHeaders')
def getMetadataHeaders( self ):
""" Return RFC-822-style headers.
"""
hdrlist = []
hdrlist.append( ( 'Title', self.Title() ) )
hdrlist.append( ( 'Subject', ', '.join( self.Subject() ) ) )
hdrlist.append( ( 'Publisher', self.Publisher() ) )
hdrlist.append( ( 'Description', self.Description() ) )
hdrlist.append( ( 'Contributors', '; '.join( self.Contributors() ) ) )
hdrlist.append( ( 'Effective_date', self.EffectiveDate() ) )
hdrlist.append( ( 'Expiration_date', self.ExpirationDate() ) )
hdrlist.append( ( 'Type', self.getPortalTypeName() or 'Unknown' ) )
hdrlist.append( ( 'Format', self.Format() ) )
hdrlist.append( ( 'Language', self.Language() ) )
hdrlist.append( ( 'Rights', self.Rights() ) )
return hdrlist
#
# MutableDublinCore methods
#
security.declarePrivate( '_datify' )
def _datify( self, attrib ):
if attrib == 'None':
attrib = None
elif not isinstance( attrib, DateTime ):
if attrib is not None:
attrib = DateTime( attrib )
return attrib
security.declareProtected(ModifyPortalContent, 'setTitle')
def setTitle( self, title ):
""" Set Dublin Core Title element - resource name.
"""
self.title = title
security.declareProtected(ModifyPortalContent, 'setCreators')
def setCreators(self, creators):
""" Set Dublin Core Creator elements - resource authors.
"""
self.creators = tuplize('creators', creators)
security.declareProtected(ModifyPortalContent, 'setSubject')
def setSubject( self, subject ):
""" Set Dublin Core Subject element - resource keywords.
"""
self.subject = tuplize( 'subject', subject )
security.declareProtected(ModifyPortalContent, 'setDescription')
def setDescription( self, description ):
""" Set Dublin Core Description element - resource summary.
"""
self.description = description
security.declareProtected(ModifyPortalContent, 'setContributors')
def setContributors( self, contributors ):
""" Set Dublin Core Contributor elements - resource collaborators.
"""
# XXX: fixme
self.contributors = tuplize('contributors', contributors, semi_split)
security.declareProtected(ModifyPortalContent, 'setEffectiveDate')
def setEffectiveDate( self, effective_date ):
""" Set Dublin Core Date element - date resource becomes effective.
"""
self.effective_date = self._datify( effective_date )
security.declareProtected(ModifyPortalContent, 'setExpirationDate')
def setExpirationDate( self, expiration_date ):
""" Set Dublin Core Date element - date resource expires.
"""
self.expiration_date = self._datify( expiration_date )
security.declareProtected(ModifyPortalContent, 'setFormat')
def setFormat( self, format ):
""" Set Dublin Core Format element - resource format.
"""
self.format = format
security.declareProtected(ModifyPortalContent, 'setLanguage')
def setLanguage( self, language ):
""" Set Dublin Core Language element - resource language.
"""
self.language = language
security.declareProtected(ModifyPortalContent, 'setRights')
def setRights( self, rights ):
""" Set Dublin Core Rights element - resource copyright.
"""
self.rights = rights
#
# Utility methods
#
security.declarePrivate('_editMetadata')
def _editMetadata(self,
title=_marker,
subject=_marker,
description=_marker,
contributors=_marker,
effective_date=_marker,
expiration_date=_marker,
format=_marker,
language=_marker,
rights=_marker,
**kw):
""" Update the editable metadata for this resource.
"""
if title is not _marker:
self.setTitle( title )
if subject is not _marker:
self.setSubject( subject )
if description is not _marker:
self.setDescription( description )
if contributors is not _marker:
self.setContributors( contributors )
if effective_date is not _marker:
self.setEffectiveDate( effective_date )
if expiration_date is not _marker:
self.setExpirationDate( expiration_date )
if format is not _marker:
self.setFormat( format )
if language is not _marker:
self.setLanguage( language )
if rights is not _marker:
self.setRights( rights )
#
# ZMI methods
#
security.declareProtected(ModifyPortalContent, 'manage_metadata')
manage_metadata = DTMLFile( 'zmi_metadata', _dtmldir )
security.declareProtected(ModifyPortalContent, 'manage_editMetadata')
def manage_editMetadata( self
, title
, subject
, description
, contributors
, effective_date
, expiration_date
, format
, language
, rights
, REQUEST
):
""" Update metadata from the ZMI.
"""
self._editMetadata( title, subject, description, contributors
, effective_date, expiration_date
, format, language, rights
)
REQUEST[ 'RESPONSE' ].redirect( self.absolute_url()
+ '/manage_metadata'
+ '?manage_tabs_message=Metadata+updated.' )
security.declareProtected(ModifyPortalContent, 'editMetadata')
def editMetadata(self
, title=''
, subject=()
, description=''
, contributors=()
, effective_date=None
, expiration_date=None
, format='text/html'
, language='en-US'
, rights=''
):
"""
Need to add check for webDAV locked resource for TTW methods.
"""
# as per bug #69, we cant assume they use the webdav
# locking interface, and fail gracefully if they dont
if hasattr(self, 'failIfLocked'):
self.failIfLocked()
self._editMetadata(title=title
, subject=subject
, description=description
, contributors=contributors
, effective_date=effective_date
, expiration_date=expiration_date
, format=format
, language=language
, rights=rights
)
self.reindexObject()
InitializeClass(DefaultDublinCoreImpl)
| StarcoderdataPython |
138637 | <reponame>cupcicm/substra<filename>tests/sdk/test_describe.py
import pytest
import substra
from .. import datastore
from .utils import mock_requests, mock_requests_response
@pytest.mark.parametrize(
'asset_name', ['dataset', 'algo', 'objective']
)
def test_describe_asset(asset_name, client, mocker):
item = getattr(datastore, asset_name.upper())
asset_response = mock_requests_response(item)
description_response = mock_requests_response('foo')
m = mocker.patch('substra.sdk.rest_client.requests.get',
side_effect=[asset_response, description_response])
method = getattr(client, f'describe_{asset_name}')
response = method("magic-key")
assert response == 'foo'
assert m.is_called()
@pytest.mark.parametrize(
'asset_name', ['dataset', 'algo', 'objective']
)
def test_describe_asset_not_found(asset_name, client, mocker):
m = mock_requests(mocker, "get", status=404)
with pytest.raises(substra.sdk.exceptions.NotFound):
method = getattr(client, f'describe_{asset_name}')
method('foo')
assert m.call_count == 1
@pytest.mark.parametrize(
'asset_name', ['dataset', 'algo', 'objective']
)
def test_describe_description_not_found(asset_name, client, mocker):
item = getattr(datastore, asset_name.upper())
asset_response = mock_requests_response(item)
description_response = mock_requests_response('foo', 404)
m = mocker.patch('substra.sdk.rest_client.requests.get',
side_effect=[asset_response, description_response])
method = getattr(client, f'describe_{asset_name}')
with pytest.raises(substra.sdk.exceptions.NotFound):
method("key")
assert m.call_count == 2
| StarcoderdataPython |
3264305 | <reponame>betodealmeida/nefelibata
from pathlib import Path
import pytest
from nefelibata.builders.index import IndexBuilder
from nefelibata.post import Post
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
test_template = """
{%- for post in posts -%}
{{ post.title }}
{% endfor -%}
{{ next }}
"""
class MockPost:
def __init__(self, title: str, date: str):
self.title = title
self.date = date
def test_process_site(mocker, fs):
config = {
"url": "https://blog.example.com/",
"language": "en",
"theme": "test-theme",
"posts-to-show": 2,
}
root = Path("/path/to/blog")
fs.create_dir(root)
fs.create_dir(root / "build")
fs.create_dir(root / "templates/test-theme")
with open(root / "templates/test-theme/index.html", "w") as fp:
fp.write(test_template)
posts = [
MockPost("one", "2020-01-01"),
MockPost("two", "2020-01-02"),
MockPost("three", "2020-01-03"),
]
mocker.patch("nefelibata.builders.index.get_posts", return_value=posts)
builder = IndexBuilder(root, config)
builder.process_site()
assert (root / "build/index.html").exists()
assert (root / "build/archive1.html").exists()
with open(root / "build/index.html") as fp:
contents = fp.read()
assert contents == "three\ntwo\narchive1.html"
with open(root / "build/archive1.html") as fp:
contents = fp.read()
assert contents == "one\nNone"
| StarcoderdataPython |
1682718 | <gh_stars>1-10
import sys
import time
import argparse
import textwrap
from art import tprint
from .dribbble_user import *
__version__ = "0.0.1"
t1 = time.perf_counter()
def main(argv=None):
argv = sys.argv if argv is None else argv
argparser = argparse.ArgumentParser(
prog="drbl_py",
formatter_class=argparse.RawTextHelpFormatter,
description=textwrap.dedent(
"""
Dribbble-py 0.0.1\n
Program to scrape dribbble user information\n
"""
),
epilog="""
Example usage
-------------\n
Download info about a user.\n
$ drbl_py -u JohnDoe\n
Download info about a user to a custom JSON file.\n
$ drbl_py -u JohnDoe -j John\n
""",
)
# User Arguments
# ---
argparser.add_argument(
"-u",
"--username",
help=textwrap.dedent(
"""Enter username to scrape.\n
"""
),
dest="username",
)
argparser.add_argument(
"-m",
"--get-metadata",
help=textwrap.dedent(
"""Get metadata about every user shot.\nTakes longer to scrape.\nDefault = No metadata about user shots
"""
),
action="store_true",
)
argparser.add_argument(
"-j",
"--json-file",
help=textwrap.dedent(
"""Name of output JSON filename.\nDefault = username.json\n
"""
),
dest="json_file",
)
argparser.add_argument("--version", action="version", version="%(prog)s 0.0.1")
args = argparser.parse_args()
if args.username:
# Set json filename
if args.json_file is None:
json_file = args.username + ".json"
elif args.json_file:
json_file = args.json_file + ".json"
tprint("DRIBBBLE-PY")
print("version {}".format(__version__))
if args.get_metadata:
try:
dribbble_user = DribbbleUser(args.username, json_file)
dribbble_user.check_user()
dribbble_user.run_nursery_with_metadata_scraper()
dribbble_user.export_to_json()
t2 = time.perf_counter()
print(f"\nScraping took {t2-t1:0.2f} second(s)...\n")
except KeyboardInterrupt:
print("Exiting dribbble-py...\n")
sys.exit(0)
else:
try:
dribbble_user = DribbbleUser(args.username, json_file)
dribbble_user.check_user()
dribbble_user.run_nursery_without_metadata_scraper()
dribbble_user.export_to_json()
t2 = time.perf_counter()
print(f"\nScraping took {t2-t1:0.2f} second(s)...\n")
except KeyboardInterrupt:
print("Exiting dribbble-py...\n")
sys.exit(0)
| StarcoderdataPython |
172832 | import warnings
from numba.errors import NumbaDeprecationWarning, \
NumbaPendingDeprecationWarning, NumbaPerformanceWarning
warnings.simplefilter('ignore', category=NumbaDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
from .core import solve as cd_solve
from .mosek import l0mosek
from .gurobi import l0gurobi
| StarcoderdataPython |
4816989 | # Copyright (c) 2019 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Test if correct CPUs are isolated """
import os
from re import findall
import pytest
import testinfra.utils.ansible_runner
from common import ansible_vars, check_skip_dpdk_tests
TESTINFRA_HOSTS = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
@pytest.fixture(scope="module")
def isolated_cores_sysfs(host):
""" Get CPU IDs from target which are isolated and return CPU IDs as
integers in a list """
ex_msg = "Failed to get isolated CPUs from sysfs with output {out}"
isolated_cores = []
isol_sysfs = get_file_output(host, "/sys/devices/system/cpu/isolated")
if not isol_sysfs:
return isolated_cores
for block in isol_sysfs.split(","):
if "-" in block:
low, high = block.split("-")
if not low.isdigit() or not high.isdigit():
raise Exception(ex_msg.format(out=isol_sysfs))
for i in range(int(low), int(high) + 1):
isolated_cores.append(i)
else:
if not block.isdigit():
raise Exception(ex_msg.format(out=isol_sysfs))
isolated_cores.append(int(block))
return isolated_cores
def get_file_output(host, path):
""" Get contents of a file from target at location defined by argument
'path' and return as string """
file = None
with host.sudo():
file = host.file(path)
if not file or not file.exists:
raise Exception("Unable to find file at path '{path}'"
.format(path=path))
return file.content_string.strip()
@pytest.fixture(scope="module")
def pmd_core_mask(host):
""" Get PMD CPU hex mask from target and return as string with leading
'0x' stripped """
hex_value = None
if not host.exists("ovs-vsctl"):
raise Exception("Failed to find ovs-vsctl in system PATH")
with host.sudo():
stdout = host.check_output("ovs-vsctl get Open_vSwitch . "
"'other_config'")
if "pmd-cpu-mask" not in stdout:
raise Exception("Failed to find 'pmd-cpu-mask' in Open_vSwitch "
"column other_config")
hex_value = host.check_output("ovs-vsctl get Open_vSwitch . "
"'other_config':pmd-cpu-mask").strip('"')
if hex_value.startswith("0x"):
hex_value = hex_value[2:]
return hex_value
@pytest.fixture(scope="module")
def pmd_core_numbers_from_mask(pmd_core_mask):
""" Convert CPU hex mask to a list of CPU core IDs in a list """
pmd_cores = []
binary_mask = bin(int(pmd_core_mask, 16))[2:]
for i, val in enumerate(binary_mask[::-1]):
if val == "1":
pmd_cores.append(i)
return pmd_cores
@pytest.fixture(scope="module")
def pmd_core_numbers_from_appctl(host):
""" Get PMD CPU IDs from target and return a list of CPU IDs """
stdout = None
with host.sudo():
if not host.exists("ovs-appctl"):
raise Exception("Failed to find ovs-vsctl in system PATH")
stdout = host.check_output("ovs-appctl dpif-netdev/pmd-rxq-show")
matches = findall(r"core_id (\d+)", stdout)
pmd_core_ids = []
for match in matches:
pmd_core_ids.append(int(match))
return pmd_core_ids
# The test functions below use the fixture "check_skip_dpdk_tests" to decide if
# the tests should be executed. If the Ansible variable "skip_ovs_dpdk_config"
# is set to True, ovs-dpdk will not be configured on the target host, making
# execution of these tests redundant. Hence, they will be skipped.
@pytest.mark.usefixtures("check_skip_dpdk_tests")
def test_isolated_cpus(isolated_cores_sysfs, pmd_core_numbers_from_mask):
""" Test if PMD cores are isolated """
assert isolated_cores_sysfs == pmd_core_numbers_from_mask, "Kernel "
"isolated CPU's does not match PMD core mask"
@pytest.mark.usefixtures("check_skip_dpdk_tests")
def test_pmd_cores_match_mask(pmd_core_numbers_from_mask,
pmd_core_numbers_from_appctl):
""" Test if PMD CPU mask applied to OVS by Ansible Role has been applied
to DPDK's PMD """
assert pmd_core_numbers_from_mask == pmd_core_numbers_from_appctl, "PMD "
"core mask from OVS does not match actual PMD pinned cores"
| StarcoderdataPython |
1635870 | """
# Data Structures and Algorithms - Part B
# Created by <NAME> (16021424)
"""
class Colours():
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
GRAY = '\033[1;30m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | StarcoderdataPython |
141539 | # Tic-Tac-Toe
import random
def drawBoard(board):
# this function prints the board
# "board" is a list of strings
print(board[7] + '|' + board[8] + '|' + board[9])
print('-+-+-')
print(board[4] + '|' + board[5] + '|' + board[6])
print('-+-+-')
print(board[1] + '|' + board[2] + '|' + board[3])
def inputPlayerLetter():
#player choose letter(X or O)
letter = ''
while not (letter == "X" or letter == "O"):
print("Do you want to be X or O")
letter = input().upper()
#first element players letter; second computers letter
if letter == "X":
return ["X", "O"]
else:
return ["O", "X"]
def whoGoesFirst():
# rand choose who goes first
if random.randint(0,1) == 0:
return "computer"
else:
return "player"
def makeMove(board, letter, move):
board[move] == letter
def isWinner(bo,le):
# Returns true if the player has won
#bo == board, le == letter
return ((bo[7] == le and bo[8] == le and bo[9] == le) or # This is the top of the board
(bo[4] == le and bo[5] == le and bo[6] == le) or # across the middle
(bo[3] == le and bo[2] == le and bo[1] == le) or # across the bottom
(bo[7] == le and bo[4] == le and bo[1] == le) or # down the left side
(bo[9] == le and bo[6] == le and bo[3] == le) or # down the middle
(bo[9] == le and bo[6] == le and bo[3] == le) or # down the right side
(bo[7] == le and bo[5] == le and bo[3] == le) or # diagonal
(bo[9] == le and bo[5] == le and bo[1] == le)) # diagonal
def getBoardCopy(board):
# make a copy of the board list and return it
boardCopy = []
for i in board:
boardCopy.append(i)
return boardCopy
def isSpaceFree(board, move):
# return true if free
return board[move] == ' '
def getPlayerMove(board):
# get player move
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board, int(move)):
print("Whats your next move? (1-9)")
move = input()
return int(move)
def chooseRandomMoveFromList(board, movesList):
# return valid move from board
# return none if none
possibleMoves = []
for i in movesList:
if isSpaceFree(board, i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
def getComputerMove(board,computerLetter):
# Get computer move
if computerLetter == "X":
playerLetter == "O"
else:
playerLetter == "X"
# Here is the first algorithm
# Check if you can win on the next move
for i in range(1,10):
boardCopy = getBoardCopy(board)
if isSpaceFree(boardCopy, i):
makeMove(boardCopy, computerLetter, i)
if isWinner(boardCopy, playerLetter):
return i
# Check if the player could win on there next turn and block them
for i in range(1,10):
boardCopy = getBoardCopy(board)
if isSpaceFree(boardCopy, i):
makeMove(boardCopy, computerLetter, i)
if isWinner(boardCopy, playerLetter):
return i
# Try to take one of the corners, if they are free
move = chooseRandomMoveFromList(board, [1,3,7,9])
if move != None:
return move
# Try to take the center, if its free.
if spaceIsFree(board, 5):
return 5
# Move on one of the sides
return chooseRandomMoveFromList(board, [2,4,6,8])
def isBoardFull(board):
# Return True if all spaces on the board are full. Otherwise, return False
for i in range(1,10):
if isSpaceFree(board, i):
return False
return True
print("Welcome to Tic-Tac-Toe!")
while True:
# reset the board
theBoard = [' '] * 10
playerLetter, computerLetter = inputPlayerLetter()
turn = whoGoesFirst()
print("The " + turn + " will go first.")
gameIsPlaying = True
while gameIsPlaying:
if turn == 'player':
# Player's turn
drawBoard(theBoard)
move = getPlayerMove(theBoard)
makeMove(theBoard, playerLetter, move)
if isWinner(theBoard, playerLetter):
drawBoard(theBoard)
print("Hooray! You have won the game!")
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print("The game is a tie!")
break
else:
turn = "computer"
else:
# Computer's turn
move = getComputerMove(theBoard, computerLetter)
makeMove(theBoard, computerLetter, move)
if isWinner(theBoard, computerLetter):
drawBoard(theBoard)
print("The computer has beaten you! You lose.")
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print("The game is a tie!")
break
else:
turn = "player"
print("Do you want to play again? (yes or no)")
if not input().lower().startswith('y'):
break
| StarcoderdataPython |
3360892 | import json
def generate_json(split):
source_path = "../../dataset/final_data/commongen/commongen." + split + ".src_alpha.txt"
target_path = "../../dataset/final_data/commongen/commongen." + split + ".tgt.txt"
out_path = "commongen." + split + ".json"
with open(source_path) as source, open(target_path) as target, open(out_path, "w") as output:
source_lines = source.readlines()
target_lines = target.readlines()
assert len(source_lines) == len(target_lines)
for i in range(len(source_lines)):
source_line = source_lines[i].strip()
target_line = target_lines[i].strip()
out = {"src": source_line, "tgt": target_line}
json.dump(out, output)
if i != len(source_lines) - 1:
output.write("\n")
generate_json("train")
generate_json("test")
generate_json("dev") | StarcoderdataPython |
3265163 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
给定二叉搜索树(BST)的根节点和一个值。 你需要在BST中找到节点值等于给定值的节点。 返回以该节点为根的子树。 如果节点不存在,则返回 NULL。
例如,
给定二叉搜索树:
4
/ \
2 7
/ \
1 3
和值: 2
你应该返回如下子树:
2
/ \
1 3
在上述示例中,如果要找的值是 5,但因为没有节点值为 5,我们应该返回 NULL。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/search-in-a-binary-search-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
import doctest
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def searchBST(self, root: TreeNode, val: int) -> TreeNode:
pass
if __name__ == '__main__':
doctest.testmod()
| StarcoderdataPython |
68755 | from csv import DictReader
from functools import lru_cache
from itertools import groupby
from pathlib import Path
from typing import TextIO
import click
import h5py
from skelshop.corpus import index_corpus_desc
from skelshop.face.consts import DEFAULT_METRIC
from skelshop.iden.idsegs import ref_arg
from skelshop.utils.click import PathPath
PENALTY_WEIGHT = 1e6
@lru_cache(maxsize=128)
def get_sparse_reader(face_path: str):
from skelshop.face.io import SparseFaceReader
h5_file = h5py.File(face_path)
face_reader = SparseFaceReader(h5_file)
return face_reader
@click.command()
@ref_arg
@click.argument("protos", type=click.File("r"))
@click.argument("corpus_desc", type=PathPath(exists=True))
@click.argument("assign_out", type=click.File("w"))
@click.option("--thresh", type=float, default=float("inf"))
@click.option("--corpus-base", type=PathPath(exists=True))
def idclus(
ref,
protos: TextIO,
corpus_desc: Path,
assign_out: TextIO,
thresh: float,
corpus_base: Path,
):
"""
Identifies clusters by comparing against a reference and forcing a match
"""
import numpy as np
corpus = index_corpus_desc(corpus_desc, corpus_base)
reader = DictReader(protos)
proto_embeddings = []
proto_group_sizes = []
clus_idxs = []
for clus_idx, clus_grp in groupby(reader, lambda row: row["clus_idx"]):
num_protos = 0
for proto in clus_grp:
faces = corpus[int(proto["video_idx"])]["faces"]
face_reader = get_sparse_reader(faces)
proto_embeddings.append(
face_reader[(int(proto["frame_num"]), int(proto["pers_id"]))]["embed"]
)
num_protos += 1
proto_group_sizes.append(num_protos)
clus_idxs.append("c" + clus_idx)
proto_embeddings_np = np.vstack(proto_embeddings)
assign_out.write("label,clus\n")
ref_labels = list(ref.labels())
for ref_idx, clus in ref.assignment(
DEFAULT_METRIC, thresh, proto_embeddings_np, proto_group_sizes
):
assign_out.write("{},{}\n".format(ref_labels[ref_idx], clus_idxs[clus]))
| StarcoderdataPython |
1681188 | import streamlit as st
from build import build_model
from build import Scatterplot
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.graph_objects as go
import plotly_express as px
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_diabetes, load_boston,load_digits
CURRENT_THEME = "dark"
hide_streamlit_style = """
<style>
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
title_container = st.sidebar.container()
image = 'tymlo.png'
with title_container:
st.sidebar.title("Explainable AI Toolkit")
st.sidebar.image(image)
#---------------------------------#
# Sidebar - Collects user input features into dataframe
with st.sidebar.header('1. Upload your CSV data'):
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file",accept_multiple_files=False, type=["csv"],key='1')
st.sidebar.markdown("""
[Example CSV input file](https://raw.githubusercontent.com/dataprofessor/data/master/delaney_solubility_with_descriptors.csv)
""")
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
st.sidebar.header('Select target')
target = st.sidebar.selectbox('Columns:', df.columns)
submit=st.sidebar.button("Select")
with st.sidebar.header(' Select Model'):
model = st.sidebar.radio('',('Logistic Regression','Random Forest Regressor','Linear Regression','Decision Tree Regressor','Support Vector Regression','Create customised Model'))#, 'KNeighborsClassifier','GaussianNB','DecisionTreeClassifier','SVC'
#---------------------------------#
# Main panel
# Displays the dataset
st.subheader('1. Dataset')
if uploaded_file is not None:
#df = pd.read_csv(uploaded_file)
st.markdown('**1.1. Glimpse of dataset**')
st.write(df)
#st.sidebar.header('3.Select target')
#target = st.sidebar.selectbox('Columns:', df.columns)
#submit=st.sidebar.button("Select")
#if submit:
#st.write(target)
if submit:
build_model(df,target,model)
#Scatterplot(df, target)
else:
#Scatterplot(df, target)
# Boston housing dataset
data = pd.read_csv('Aer_test.csv')
X = data.drop(['card'], axis=1)
Y = data['card']
#st.write(Y.head())
#st.write(X.head())
df = pd.concat( [X,Y], axis=1 )
st.markdown('This dataset is used as the example.')
st.write(df.head(5))
build_model(df,'',model)
# Sidebar - Specify parameter settings
| StarcoderdataPython |
3382792 | <reponame>KPMcKenna/GBMaker
import warnings
from pymatgen.core import Structure
def formatwarning(
message,
catagory,
*_,
):
return f"{catagory.__name__}\n{message}\n"
warnings.formatwarning = formatwarning
class Warnings:
@classmethod
def UnitCell(cls, unit_cell: Structure):
warnings.warn(f"Non-conventional unit cell supplied, using:\n{unit_cell}")
return
| StarcoderdataPython |
1768984 | import math
import sys
def main(filepath):
with open(filepath, 'r') as f:
for line in f.readlines():
if line:
line = line.strip()
line = line.split(';')
n = int(line[0])
grid = map(int, line[1].split(','))
s = SodukuGrid(n, grid)
print s.isValid()
class SodukuGrid:
def __init__(self, size, grid):
self.size = size
self.grid = grid
def isValid(self):
rows_are_valid = self._rowsAreValid()
cols_are_valid = self._columnsAreValid()
regions_are_valid = self._regionsAreValid()
return rows_are_valid and cols_are_valid and regions_are_valid
def _rowsAreValid(self):
row_nums = []
valid = True
num_range = range(1,self.size+1)
for i, num in enumerate(self.grid):
row_nums.append(num)
if (i+1) % self.size == 0:
if not sorted(row_nums) == num_range:
valid = False
break
row_nums = []
return valid
def _columnsAreValid(self):
valid = True
num_range = range(1,self.size+1)
for i in xrange(self.size):
col_nums = []
for j in xrange(self.size):
col_nums.append(self.grid[i+(j*self.size)])
if not sorted(col_nums) == num_range:
valid = False
break
return valid
def _regionsAreValid(self):
valid = True
num_range = range(1,self.size+1)
region_size = int(math.sqrt(self.size))
region_spacer = region_size ** 3 - self.size
# loop through each region
for i in xrange(self.size):
region_nums = []
# loop through each line in the region
for j in xrange(region_size):
starting_index = (i*region_size)+(j*self.size)+(i//region_size*region_spacer)
ending_index = starting_index + region_size
region_nums += self.grid[starting_index:ending_index]
if not sorted(region_nums) == num_range:
valid = False
break
return valid
if __name__ == '__main__':
main(sys.argv[1])
| StarcoderdataPython |
1779940 | from __future__ import annotations
from typing import Dict, Union, cast
from deprecation import deprecated
from httpx import Response, Timeout
from .. import __version__
from ..base_client import (
DEFAULT_POSTGREST_CLIENT_HEADERS,
DEFAULT_POSTGREST_CLIENT_TIMEOUT,
BasePostgrestClient,
)
from ..utils import AsyncClient
from .request_builder import AsyncRequestBuilder
class AsyncPostgrestClient(BasePostgrestClient):
"""PostgREST client."""
def __init__(
self,
base_url: str,
*,
schema: str = "public",
headers: Dict[str, str] = DEFAULT_POSTGREST_CLIENT_HEADERS,
timeout: Union[int, float, Timeout] = DEFAULT_POSTGREST_CLIENT_TIMEOUT,
) -> None:
BasePostgrestClient.__init__(
self,
base_url,
schema=schema,
headers=headers,
timeout=timeout,
)
self.session = cast(AsyncClient, self.session)
def create_session(
self,
base_url: str,
headers: Dict[str, str],
timeout: Union[int, float, Timeout],
) -> AsyncClient:
return AsyncClient(
base_url=base_url,
headers=headers,
timeout=timeout,
)
async def __aenter__(self) -> AsyncPostgrestClient:
return self
async def __aexit__(self, exc_type, exc, tb) -> None:
await self.aclose()
async def aclose(self) -> None:
await self.session.aclose()
def from_(self, table: str) -> AsyncRequestBuilder:
"""Perform a table operation."""
base_url = str(self.session.base_url)
headers = dict(self.session.headers.items())
session = self.create_session(base_url, headers, self.session.timeout)
session.auth = self.session.auth
return AsyncRequestBuilder(session, f"/{table}")
def table(self, table: str) -> AsyncRequestBuilder:
"""Alias to self.from_()."""
return self.from_(table)
@deprecated("0.2.0", "1.0.0", __version__, "Use self.from_() instead")
def from_table(self, table: str) -> AsyncRequestBuilder:
"""Alias to self.from_()."""
return self.from_(table)
async def rpc(self, func: str, params: dict) -> Response:
"""Perform a stored procedure call."""
path = f"/rpc/{func}"
return await self.session.post(path, json=params)
| StarcoderdataPython |
4815997 | <filename>utils/MathUtils.py
# 数学算法
from _pydecimal import Context, ROUND_HALF_UP
class MathUtils(object):
# 绝对值
@staticmethod
def absValue(value):
return abs(value)
# 十进制转二进制
@staticmethod
def toBin(value):
return bin(value)
# 十进制转八进制
@staticmethod
def toOct(value):
return oct(value)
# 十进制转十六进制
@staticmethod
def toHex(value):
return hex(value)
# 十进制转ASCII字符,65->'A',97->'a'
@staticmethod
def toASCII(value):
return chr(value)
# ASCII转十进制
@staticmethod
def fromASCII(value):
return ord(value)
# 次幂, z存在则需要对结果进行取模再返回(pow(x,y)%z)
@staticmethod
def powValue(x, y, z=None):
return pow(x, y, z)
# 取商和余数
@staticmethod
def divmodValue(value, divisor):
return divmod(value, divisor)
# 四舍五入
# 精度要求不高,round(value[, n]),n表示小数点后保留的位数,默认0表示整数后加.0
# 推荐decimal
@staticmethod
def roundValue(value, n=None):
return (Context(prec=3, rounding=ROUND_HALF_UP).create_decimal(str(value)))
# 求和, 适用于list和tuple
@staticmethod
def sumValues(values):
return sum(values)
# 去掉最高和最低求平均,借助切片操作,左闭右开
@staticmethod
def average_score(lst):
lst.sort()
lst_src = lst[1:(len(lst) - 1)]
return round((sum(lst_src) / len(lst_src)), 2)
| StarcoderdataPython |
82802 | # October 2018
'''
cifero.sheets
Modules syll and translit use cifero.sheets.sheetsdict in their functions.
'''
################################################################################
# default cipher sheets
# better not change these
# these aren't linked to the main program.
ipa_sheet = {
'title': 'IPA',
'consonants':
[
'p','b','',
't','d','',
'k','g','',
'θ','ð','',
'f','v','',
's','z','ʦ',
'ʃ','ʒ','',
'ʧ','ʤ','',
'h','x','ɲ',
'n','m','ŋ',
'l','ɹ','r',
'ʔ','j','w'
],
'vowels':
[
'ɑ','a','',
'ɪ','i','',
'ʊ','u','',
'ɛ','e','',
'o','ɔ','',
'ə','ʌ','',
'æ','',''
]
}
key_sheet = {
'title' : 'Key',
'consonants':
[
'9','b','',
'1','d','',
'7','g','',
'f','t','',
'8','v','',
'0','z','c',
'q','p','',
'6','j','',
'h','k','m',
'2','3','n',
'5','4','r',
'l','y','w'
],
'vowels':
[
'a','&','',
'i','#','',
'u','$','',
'e','%','',
'o','@','',
'x','=','',
's','',''
]
}
base_sheet = {
'title' : 'Base',
'consonants':
[
'p','b','',
't','d','',
'k','g','',
'(th)','(dth)','',
'f','v','',
's','z','(ts)',
'(sh)','(jh)','',
'(ch)','j','',
'h','(kh)','(ny)',
'n','m','(ng)',
'l','r','(rr)',
'(-)','y','w'
],
'vowels':
[
'a','(aa)','',
'i','(ii)','',
'u','(oo)','',
'e','(ee)','',
'o','(aw)','',
'(uh)','(ah)','',
'(ea)','',''
]
}
cipher_sheet = {
'title' : 'Cipher',
'consonants':
[
'b','bf','',
'd','df','',
'g','gf','',
't','tf','',
'p','pf','',
'c','cf','cn',
'k','kf','',
'q','qf','',
'h','hf','hn',
'm','mf','mn',
'r','rf','rn',
'w','wf','wn'
],
'vowels' :
[
'l','lf','',
'j','jf','',
'y','yf','',
'z','zf','',
'v','vf','',
'x','xf','',
's','',''
]
}
# note that 'marks' and 'symbols' are somewhat arbitrarily classified by their
# relative position in a word. This is strict and inputs often don't conform
# To be safe, just strip punctuation before transliterating.
# list of punctuation marks. These must be attached to the end of a word
# \ undefined
marks = (
':', ';', ',', '.', '!', '?',
'[', ']', '(', ')', '{', '}',
'"',"'",'<','>'
)
# list of symbols. These must stand alone in a sentence
# _ ^ | undefined
# Edit Oct '18: &#$%@= are now used for default key (capitalization problems)
symbols = (
'*','+','-','/','~'
)
def fl(list1):
'''remove empty strings from list'''
list1 = list(filter(None, list1))
return list1
# cons in IPA, 'class t'
reg1_c = fl(ipa_sheet['consonants'][:15] + ipa_sheet['consonants'][21:24])
# cons in IPA, 'class s'
reg2_c = fl(ipa_sheet['consonants'][15:17] + ipa_sheet['consonants'][18:21])
# cons in IPA, 'class h'
irreg1_c = fl(ipa_sheet['consonants'][24:26])
# cons in IPA, 'class n'
irreg2_c = fl(ipa_sheet['consonants'][26:30])
# cons in IPA, 'class r'
irreg3_c = fl(ipa_sheet['consonants'][30:33])
# pseudo-vowels in IPA
pseudo_v = fl(ipa_sheet['consonants'][34:])
other_c = fl([ipa_sheet['consonants'][17]] + [ipa_sheet['consonants'][33]])
def init_vcc():
'''cons clusters by their "classes"'''
vcc_st = [s + t for s in reg2_c for t in reg1_c]
vcc_sn = [s + n for s in reg2_c for n in irreg2_c]
vcc_sr = [s + r for s in reg2_c for r in irreg3_c]
vcc_tr = [t + r for t in reg1_c for r in irreg3_c]
return vcc_st + vcc_sn + vcc_sr + vcc_tr
# valid consonant clusters in IPA
vcc = init_vcc()
################################################################################
sheetsdict = {ipa_sheet['title'] : ipa_sheet,
key_sheet['title']: key_sheet,
base_sheet['title'] : base_sheet,
cipher_sheet['title'] : cipher_sheet}
################################################################################ | StarcoderdataPython |
102514 | """
Tests find_lcs_optimized function
"""
import timeit
import unittest
from lab_2.main import find_lcs_length, find_lcs_length_optimized
class FindLcsOptimizedTest(unittest.TestCase):
"""
Checks for find_lcs_optimized function
"""
def test_find_lcs_length_optimized_works_faster(self):
"""
Tests find_lcs_optimized function
can work faster than find_lcs_length function
"""
sentence_first = ('the', 'dog', 'is', 'running', 'here')
sentence_second = ('a', 'boy', 'plays', 'with', 'ball')
plagiarism_threshold = 0.3
start_time = timeit.default_timer()
find_lcs_length(sentence_first, sentence_second, plagiarism_threshold)
end_time = timeit.default_timer()
not_optimized = end_time - start_time
start_time_second = timeit.default_timer()
find_lcs_length_optimized(sentence_first, sentence_second, plagiarism_threshold)
end_time_second = timeit.default_timer()
optimized = end_time_second - start_time_second
self.assertGreater(not_optimized, optimized)
| StarcoderdataPython |
3325775 | <filename>datasets/test/soy_small/importer.py
'''Fetcher for the Soybean (Small) dataset'''
import numpy as np
import pandas as pd
## Config ----------------------------------------------------------------------
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/soybean/soybean-small.data'
INT_FMT = '%1i'
class_map = ['D1', 'D2', 'D3', 'D4']
## Run -------------------------------------------------------------------------
# Read data from UCI
ds = pd.read_csv(URL, header=None)
# Convert the Dn labels to 0..3 and move to another dataframe
labels = np.searchsorted(class_map, ds.loc[:,35].values)
ds = ds.drop(35, axis=1)
# Save files
np.savetxt('labels.csv', labels, fmt=INT_FMT)
np.savetxt('data.csv', ds, fmt=INT_FMT, delimiter=',')
| StarcoderdataPython |
4801530 | <filename>core/migrations/0014_note_period.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-30 10:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0013_auto_20170430_0830'),
]
operations = [
migrations.AddField(
model_name='note',
name='period',
field=models.SmallIntegerField(max_length=1, null=True, verbose_name='Period'),
),
]
| StarcoderdataPython |
1670237 | import pandas as pd
import sqlalchemy as sql
import sqlalchemy.sql.functions as db_func
import sqlalchemy.sql.expression as db_expr
from sqlalchemy.orm import aliased
from sqlalchemy.types import ARRAY, INT, VARCHAR, FLOAT
from schools3.config.data import db_tables
from schools3.config.data.features import features_config
from schools3.config.data.features import snapshot_features_config
from schools3.data.features.features_table import FeaturesTable
from schools3.data.features.processors.composite_feature_processor import CompositeFeatureProcessor
from schools3.data.features.processors.impute_null_processor import ImputeNullProcessor
from schools3.data.features.processors.categorical_feature_processor import CategoricalFeatureProcessor
from schools3.data.features.processors.replace_nullish_processor import ReplaceNullishProcessor
# table of features derived from `clean.all_snapshots` in the database
# Not all features we use that are based on `clean.all_snapshots` are computed in this file
# However, this table contains many features that can be directly read off or are simple to compute
class SnapshotFeatures(FeaturesTable):
def __init__(self):
cols = [
sql.Column('gender', VARCHAR),
sql.Column('ethnicity', VARCHAR),
sql.Column('school_name', VARCHAR),
sql.Column('district', VARCHAR),
sql.Column('disability', VARCHAR),
sql.Column('disadvantagement', VARCHAR),
sql.Column('economic_disadvantagement', INT),
sql.Column('academic_disadvantagement', INT),
sql.Column('limited_english', VARCHAR),
sql.Column('discipline_incidents', INT),
sql.Column('pre_1_year_discipline_incidents', INT),
sql.Column('pre_2_year_discipline_incidents', INT),
sql.Column('days_absent', FLOAT),
sql.Column('pre_1_year_days_absent', FLOAT),
sql.Column('pre_2_year_days_absent', FLOAT),
sql.Column('age', INT),
sql.Column('num_transfers', INT),
sql.Column('cumul_discipline_incidents', INT),
]
feature_processor = CompositeFeatureProcessor([
ReplaceNullishProcessor(
column_list=snapshot_features_config.replace_nullish_columns
),
ImputeNullProcessor(
col_val_dict=snapshot_features_config.fill_values,
col_flag_set=snapshot_features_config.impute_flag_columns
),
CategoricalFeatureProcessor(
column_list=snapshot_features_config.categorical_columns
)
])
super(SnapshotFeatures, self).__init__(
table_name='snapshot_features',
feature_cols=cols,
categorical_cols=snapshot_features_config.categorical_columns,
post_features_processor=feature_processor
)
def get_data_query(self):
all_snapshots = db_tables.clean_all_snapshots_table
student_lookup = all_snapshots.c.student_lookup
school_year = all_snapshots.c.school_year
grade = all_snapshots.c.grade
gender = all_snapshots.c.gender
ethnicity = all_snapshots.c.ethnicity
school_name = all_snapshots.c.school_name
district = all_snapshots.c.district
disability = all_snapshots.c.disability
disadvantagement = all_snapshots.c.disadvantagement
limited_english = all_snapshots.c.limited_english
discipline_incidents = all_snapshots.c.discipline_incidents
days_absent = all_snapshots.c.days_absent
birth_date = all_snapshots.c.birth_date
age = school_year - sql.cast(sql.func.substr(sql.cast(birth_date, VARCHAR), 1, 4), INT)
economic_disadvantagement = sql.case(
[(all_snapshots.c.disadvantagement.in_(['economic', 'both']), 1),
(all_snapshots.c.disadvantagement.in_(['academic', 'none']), 0)],
else_ = None)
academic_disadvantagement = sql.case(
[(all_snapshots.c.disadvantagement.in_(['academic', 'both']), 1),
(all_snapshots.c.disadvantagement.in_(['economic', 'none']), 0)],
else_ = None)
# get a single row for each (student_lookup, school_year, grade) pair
snapshots = sql.select([
student_lookup,
school_year,
grade,
gender,
ethnicity,
school_name,
district,
disability,
disadvantagement,
economic_disadvantagement.label('economic_disadvantagement'),
academic_disadvantagement.label('academic_disadvantagement'),
limited_english,
discipline_incidents,
days_absent,
age.label('age'),
]).\
distinct(student_lookup, school_year, grade).\
where(
student_lookup != None,
).\
order_by(
student_lookup,
sql.desc(school_year)
).cte('snapshots_temp_a')
# join on previous years to get features from the past for same student_lookup
snapshots = self.join_history_feats(snapshots)
return snapshots
def join_history_feats(self, snapshots):
a = aliased(snapshots, name='snapshot_history_a')
b = aliased(snapshots, name='snapshot_history_b')
c = aliased(snapshots, name='snapshot_history_c')
d = aliased(snapshots, name='snapshot_history_d')
joined = sql.join(
left=a, right=b,
onclause=db_expr.and_(
a.c.student_lookup == b.c.student_lookup,
a.c.school_year >= b.c.school_year,
b.c.grade >= features_config.min_grade
),
isouter=True
)
joined = sql.join(
left=joined, right=c,
onclause=db_expr.and_(
joined.c[a.name+'_student_lookup'] == c.c.student_lookup,
joined.c[a.name+'_school_year'] == c.c.school_year + 1,
joined.c[a.name+'_grade'] == c.c.grade + 1
),
isouter=True
)
joined = sql.join(
left=joined, right=d,
onclause=db_expr.and_(
joined.c[a.name+'_student_lookup'] == d.c.student_lookup,
joined.c[a.name+'_school_year'] == d.c.school_year + 2,
joined.c[a.name+'_grade'] == d.c.grade + 2
),
isouter=True
)
num_transfers = db_func.count(sql.distinct(joined.c[b.name+'_school_name'])) - 1
num_transfers = sql.case([(num_transfers < 0, 0)], else_=num_transfers) # special case for nulls
cumul_discipline_incidents = db_func.sum(joined.c[b.name+'_discipline_incidents'])
joined_a_cols = [
joined.c[a.name+'_student_lookup'],
joined.c[a.name+'_school_year'],
joined.c[a.name+'_grade'],
joined.c[a.name+'_gender'],
joined.c[a.name+'_ethnicity'],
joined.c[a.name+'_school_name'],
joined.c[a.name+'_district'],
joined.c[a.name+'_disability'],
joined.c[a.name+'_disadvantagement'],
joined.c[a.name+'_economic_disadvantagement'],
joined.c[a.name+'_academic_disadvantagement'],
joined.c[a.name+'_limited_english'],
joined.c[a.name+'_discipline_incidents'],
joined.c[c.name+'_discipline_incidents'].label('pre_1_year_discipline_incidents'),
joined.c[d.name+'_discipline_incidents'].label('pre_2_year_discipline_incidents'),
joined.c[a.name+'_days_absent'],
joined.c[c.name+'_days_absent'].label('pre_1_year_days_absent'),
joined.c[d.name+'_days_absent'].label('pre_2_year_days_absent'),
joined.c[a.name+'_age'],
]
return sql.select(
joined_a_cols +
[
num_transfers.label('num_transfers'),
cumul_discipline_incidents.label('cumul_discipline_incidents')
]
).\
select_from(joined).\
where(
joined.c[a.name+'_grade'] >= 9
).\
group_by(
*joined_a_cols
).\
order_by(
joined.c[a.name+'_student_lookup'],
sql.desc(joined.c[a.name+'_school_year'])
) | StarcoderdataPython |
1684962 | def count_parameters(model):
"""Counts the number of parameters in a model."""
return sum(param.numel() for param in model.parameters() if param.requires_grad_)
class AttrDict(dict):
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, item):
return self[item]
| StarcoderdataPython |
1748757 | """Casambi implementation."""
import logging
import time
import random
import re
from typing import Tuple
from pprint import pformat
from asyncio import TimeoutError, sleep
from aiohttp import client_exceptions
from .errors import (
AiocasambiException,
LoginRequired,
ResponseError,
RateLimit,
CasambiAPIServerError,
)
from .websocket import (
WSClient,
)
from .consts import (
SIGNAL_CONNECTION_STATE,
SIGNAL_DATA,
STATE_RUNNING,
SIGNAL_UNIT_PULL_UPDATE,
MAX_NETWORK_IDS,
MAX_RETRIES,
)
from .units import Units
from .unit import Unit
from .scenes import Scenes
LOGGER = logging.getLogger(__name__)
class Controller:
"""Casambi controller."""
def __init__(
self,
*,
email: str,
api_key: str,
websession,
user_password: str = None,
network_password: str = None,
sslcontext=None,
callback=None,
network_timeout: int = 300,
):
self.email = email
self.user_password = <PASSWORD>
self.network_password = <PASSWORD>
self.api_key = api_key
self.network_timeout = network_timeout
self.session = websession
self.sslcontext = sslcontext
self.callback = callback
self.rest_url = "https://door.casambi.com/v1"
self.headers = {
"Content-type": "application/json",
"X-Casambi-Key": self.api_key,
}
self.websocket = {}
self._session_ids = {}
self._network_ids = set()
self.units = {}
self.scenes = {}
self._wire_id_to_network_id = {}
self._reconnecting = False
self._last_websocket_ping = time.time()
def set_session_id(self, *, session_id: str) -> None:
"""Set session id"""
self.headers["X-Casambi-Session"] = session_id
def get_units(self) -> list:
result = []
"""Getter for getting units."""
for network_id in self._network_ids:
for unit in self.units[network_id].get_units():
result.append(unit)
return result
def get_scenes(self) -> list:
"""Getter for getting scenes."""
return self.scenes.get_scenes()
async def create_session(self) -> None:
LOGGER.debug("Create session called!")
for i in range(0, MAX_RETRIES):
"""Create Casambi session."""
try:
if self.user_password:
LOGGER.debug("Creating user session")
await self.create_user_session()
return
except TimeoutError:
LOGGER.debug(
"caught asyncio.TimeoutError when trying to create user session, trying again"
)
await sleep(self.network_timeout)
continue
try:
if self.network_password:
LOGGER.debug("Creating network session")
await self.create_network_session()
return
except TimeoutError:
LOGGER.debug(
"caught asyncio.TimeoutError when trying to create network session, trying again"
)
await sleep(self.network_timeout)
continue
err_msg = "create_session failed to setup session!"
LOGGER.error(err_msg)
raise AiocasambiException(err_msg)
async def create_user_session(self) -> None:
"""
Creating user session.
Expected response:
{
"sessionId": "hJK65SenmlL2354y.P822D76HufewNSloo780PvU-78DwdmnMA8exzIo9.mmNWD23whEqbPOsl11hjjWo03___",
"sites": {
"Rg5alx4BF41lSU2jK4r7T0Q7X0i00mQ": {
"name": "Playground",
"address": "",
"role": "ADMIN",
"networks": {
"VcrTwqLZJ26UYMXxTClmpfZxELcrPUAa": {
"id": "VcrTwqLZJ26UYMXxTClmpfZxELcrPUAa",
"address": "a00f251f77cc",
"name": "Dev Network",
"type": "OPEN",
"grade": "EVOLUTION",
"role": "ADMIN"
}
}
}
},
"networks": {
"VcrTwqLZJ26UYMXxTClmpfZxELcrPUAa": {
"id": "VcrTwqLZJ26UYMXxTClmpfZxELcrPUAa",
"address": "a00f251f77cc",
"name": "Dev Network",
"type": "OPEN",
"grade": "EVOLUTION",
"role": "ADMIN"
}
}
}
"""
url = f"{self.rest_url}/users/session"
LOGGER.debug("create_user_session called")
headers = {"Content-type": "application/json", "X-Casambi-Key": self.api_key}
auth = {
"email": self.email,
"password": <PASSWORD>,
}
LOGGER.debug(
f"create_user_session headers: {pformat(headers)} auth: {pformat(auth)}"
)
data = None
try:
data = await self.request("post", url=url, json=auth, headers=headers)
except LoginRequired as err:
LOGGER.error("create_user_session caught LoginRequired exception")
raise err
LOGGER.debug(f"create_user_session data from request {data}")
self.set_session_id(session_id=data["sessionId"])
for network_key in data["networks"].keys():
self._network_ids.add(data["networks"][network_key]["id"])
if "sessionId" in data["networks"][network_key]:
self._session_ids[network_key] = data["networks"][network_key][
"sessionId"
]
else:
self._session_ids[network_key] = data["sessionId"]
LOGGER.debug(
f"network_ids: {pformat(self._network_ids)} session_ids: {pformat(self._session_ids)}"
)
async def create_network_session(self) -> None:
"""
Creating network session.
Expected response:
{
'VcrTwqLZJ26UYMXxTClmpfZxELcrPUAa': {
'address': 'ff69cc2fdf00',
'grade': 'CLASSIC',
'id': 'VcrTwqLZJ26UYMXxTClmpfZxELcrPUAa',
'mac': 'ff69cc2fdf00',
'name': 'Dev Network',
'sessionId': '5ARffxyrpwJYy7Hf1xxx-HmF18Agmff39kSKDxxBxxxWkUg59SU9pii.9jBVi6PEyfq9Y9gokiel0yfljGmJQg__',
'type': 'PROTECTED'
},
'TYqGffRLwKrArqkOQVtXcw1ffgdLIjkU': {
'address': 'ffcaaaacbb51',
'grade': 'EVOLUTION',
'id': 'TYqGffRLwKrArqkOQVtXcw1ffgdLIjkU',
'mac': 'ffcaaaacbb51',
'name': 'Dev Network',
'sessionId': 'KDRmwOqerOsTyrr0x9HLrGFe1nknEk3oRoT-Kz3DJ.wx97MTXQXC.ZbWwqt9ze0KwC6h3GCTlPsUemX8uvK5Ow__',
'type': 'PROTECTED'}
}
"""
LOGGER.debug("create_network_session called")
url = f"{self.rest_url}/networks/session"
headers = {"Content-type": "application/json", "X-Casambi-Key": self.api_key}
auth = {
"email": self.email,
"password": self.network_password,
}
LOGGER.debug(f"create_network_session headers: {headers} auth: {auth}")
data = None
try:
data = await self.request("post", url=url, json=auth, headers=headers)
except LoginRequired as err:
LOGGER.error("create_network_session: caught LoginRequired exception")
raise err
LOGGER.debug(f"create_network_session: data from request {pformat(data)}")
for network_id in data.keys():
self._network_ids.add(data[network_id]["id"])
self._session_ids[network_id] = data[network_id]["sessionId"]
LOGGER.debug(
f"create_network_session: network_ids: {pformat(self._network_ids)} session_ids: {pformat(self._session_ids)}"
)
async def get_network_information(self) -> dict:
"""Creating network information."""
# GET https://door.casambi.com/v1/networks/{id}
result = {}
failed_network_ids = []
LOGGER.debug("get_network_information called")
if not self._network_ids or len(self._network_ids) == 0:
raise AiocasambiException("Network ids not set")
for network_id in self._network_ids:
self.set_session_id(session_id=self._session_ids[network_id])
url = f"{self.rest_url}/networks/{network_id}"
dbg_msg = f"get_network_information request <url: {url} "
dbg_msg += f"headers= {self.headers}>"
LOGGER.debug(dbg_msg)
data = None
try:
data = await self.request("get", url=url, headers=self.headers)
except LoginRequired:
LOGGER.error(
f"get_network_information caught LoginRequired exception for network_id: {network_id}"
)
failed_network_ids.append(network_id)
continue
LOGGER.debug(f"get_network_information response: {pformat(data)}")
result[network_id] = data
if len(result) == 0:
raise AiocasambiException(
"get_network_information Failed to get any network information!"
)
for failed_network_id in failed_network_ids:
self.__remove_network_id(network_id=failed_network_id)
return result
async def get_network_state(self) -> dict:
"""Get network state."""
# GET https://door.casambi.com/v1/networks/{networkId}/state
result = []
failed_network_ids = []
if not self._network_ids or len(self._network_ids) == 0:
raise AiocasambiException("Network ids not set")
LOGGER.debug(f"get_network_state called units: {pformat(self.units)}")
for network_id in self._network_ids:
failed_network_request = False
self.set_session_id(session_id=self._session_ids[network_id])
url = f"{self.rest_url}/networks/{network_id}/state"
LOGGER.debug(
f"get_network_state request url: {url} headers= {self.headers}"
)
data = None
for i in range(0, MAX_RETRIES):
try:
data = await self.request("get", url=url, headers=self.headers)
except LoginRequired:
LOGGER.error(
f"get_network_state caught LoginRequired exception for network_id: {network_id}"
)
failed_network_ids.append(network_id)
failed_network_request = True
break
except TimeoutError:
LOGGER.debug(
"caught asyncio.TimeoutError when initialize tried to fetch network information, trying again"
)
await sleep(self.network_timeout)
continue
# Success!
break
if failed_network_request:
continue
if not data:
error_msg = "get_network_state failed to get network state!"
LOGGER.error(error_msg)
raise AiocasambiException(error_msg)
LOGGER.debug(f"get_network_state response: {data}")
self.units[network_id].process_network_state(data)
self.callback(
SIGNAL_UNIT_PULL_UPDATE, self.units[network_id].get_units_unique_ids()
)
result.append(data)
if len(result) == 0:
raise AiocasambiException("get_network_state failed to get any state!")
for failed_network_id in failed_network_ids:
self.__remove_network_id(network_id=failed_network_id)
return result
async def init_unit_state_controls(self, *, network_id: str) -> None:
"""
Getter for getting the unit state from Casambis cloud api
"""
# GET https://door.casambi.com/v1/networks/{id}
unit_regexp = re.compile(r"(?P<network_id>[a-zA-Z0-9]+)-(?P<unit_id>\d+)$")
unique_ids = self.units[network_id].get_units_unique_ids()
LOGGER.debug(f"init_unit_state_controls unique_ids: {pformat(unique_ids)}")
for unique_unit_id in unique_ids:
match = unit_regexp.match(unique_unit_id)
network_id = match.group("network_id")
unit_id = match.group("unit_id")
data = None
for i in range(0, MAX_RETRIES):
try:
data = await self.get_unit_state_controls(
unit_id=unit_id, network_id=network_id
)
except TimeoutError:
LOGGER.debug(
"caught asyncio.TimeoutError when initialize tried to fetch network information, trying again"
)
await sleep(self.network_timeout)
continue
# Success!
break
if not data:
error_msg = f"init_unit_state_controls failed to get unit state for unit: {unique_unit_id}"
LOGGER.error(error_msg)
raise AiocasambiException(error_msg)
self.units[network_id].set_controls(unit_id=unit_id, data=data)
def get_unit(self, *, unit_id: int, network_id: str) -> Unit:
"""
Get specific unit
"""
return self.units[network_id].get_unit(unit_id=unit_id)
def get_unit_value(self, *, unit_id: int, network_id: str) -> int:
"""
Get the unit value
"""
return self.units[network_id].get_unit_value(unit_id=unit_id)
def get_unit_distribution(self, *, unit_id: int, network_id: str) -> int:
"""
Get the unit distribution
"""
return self.units[network_id].get_unit_distribution(unit_id=unit_id)
async def get_unit_state(self, *, unit_id: int, network_id: str) -> dict:
"""
Getter for getting the unit state from Casambis cloud api
"""
# GET https://door.casambi.com/v1/networks/{id}
if not self._network_ids or len(self._network_ids) == 0:
raise AiocasambiException("Network ids not set")
session_id = self._session_ids[network_id]
self.set_session_id(session_id=session_id)
url = "https://door.casambi.com/v1/networks/"
url += f"{network_id}/units/{unit_id}/state"
LOGGER.debug(
f"get_unit_state called, unit_id: {unit_id}, network_id: {network_id} session_id: {session_id}"
)
data = None
try:
data = await self.request("get", url=url, headers=self.headers)
except LoginRequired as err:
LOGGER.error("get_unit_state caught LoginRequired exception")
raise err
return data
async def get_unit_state_controls(self, *, unit_id: int, network_id: str) -> list:
"""
Get unit controls for unit
{
'activeSceneId': 0,
'address': '26925689c64c',
'condition': 0,
'controls': [[{'type': 'Dimmer', 'value': 0.0},
{'level': 0.49736842105263157,
'max': 6000,
'min': 2200,
'type': 'CCT',
'value': 4090.0}]],
'dimLevel': 0.0,
'firmwareVersion': '26.24',
'fixtureId': 14235,
'groupId': 0,
'id': 13,
'image': 'mbUdKbLz5g3VsVNJIgTYboHa8ce9YfSK',
'name': 'Arbetslampa',
'on': True,
'online': True,
'position': 9,
'priority': 3,
'status': 'ok',
'type': 'Luminaire'
}
"""
data = await self.get_unit_state(unit_id=unit_id, network_id=network_id)
if "controls" in data:
return data["controls"]
return []
async def initialize(self) -> None:
"""Initialiser"""
LOGGER.debug("initialize called")
network_information = None
for i in range(0, MAX_RETRIES):
try:
network_information = await self.get_network_information()
break
except TimeoutError:
LOGGER.debug(
"caught asyncio.TimeoutError when initialize tried to fetch network information, trying again"
)
await sleep(self.network_timeout)
continue
# Success!
break
if not network_information:
error_msg = "initialize failed to fetch network information"
LOGGER.error(error_msg)
raise AiocasambiException(error_msg)
for network_id, data in network_information.items():
self.units[network_id] = Units(
data["units"],
controller=self,
network_id=network_id,
wire_id=0,
)
self.scenes[network_id] = Scenes(
data["scenes"],
controller=self,
network_id=network_id,
wire_id=0,
)
LOGGER.debug(f"initialize network__information: {pformat(network_information)}")
# Get initial network state
await self.get_network_state()
LOGGER.debug(
f"initialize getting unit state for all units in network_ids: {pformat(self._network_ids)}"
)
for network_id in self._network_ids:
await self.init_unit_state_controls(network_id=network_id)
return
async def start_websockets(self) -> None:
"""
Start websocket for all networks
"""
LOGGER.debug("start_websockets called")
for network_id in self._network_ids:
LOGGER.debug(f"start_websockets starting network_id: {network_id}")
await self.start_websocket(network_id=network_id)
async def start_websocket(self, *, network_id: str) -> None:
"""
Start websession and websocket to Casambi.
"""
LOGGER.debug(f"start_websocket called network_id: {network_id}")
wire_id = random.randint(1, MAX_NETWORK_IDS)
while wire_id not in self._wire_id_to_network_id:
wire_id = random.randint(1, MAX_NETWORK_IDS)
self._wire_id_to_network_id[wire_id] = network_id
LOGGER.debug(
f"start_websocket generate wire_id: {wire_id} network_id: {network_id}"
)
session_id = self._session_ids[network_id]
dbg_msg = f"start_websocket: api_key: {self.api_key},"
dbg_msg += f" network_id: {network_id},"
dbg_msg += f" user_session_id: {session_id},"
dbg_msg += f" wire_id: {wire_id}"
LOGGER.debug(dbg_msg)
self.websocket[network_id] = WSClient(
session=self.session,
ssl_context=self.sslcontext,
api_key=self.api_key,
network_id=network_id,
session_id=session_id,
wire_id=wire_id,
controller=self,
callback=self.session_handler,
)
self.websocket[network_id].start()
# We don't want to ping right after we setup a websocket
self._last_websocket_ping = time.time()
# Set wire_id
self.set_wire_id(wire_id=wire_id, network_id=network_id)
async def ws_ping(self) -> None:
"""Function for setting a ping over websocket"""
current_time = time.time()
if current_time < (self._last_websocket_ping + 60 * 3 + 30):
# Ping should be sent every 5 min
msg = "Not sending websocket ping, "
msg += f"current_time: {current_time}, "
msg += f"last websocket ping: {self._last_websocket_ping}"
LOGGER.debug(msg)
return
for wire_id, network_id in self._wire_id_to_network_id.items():
message = {
"method": "ping",
"wire": wire_id,
}
LOGGER.debug(f"Sending websocket ping: {message}")
succcess = await self.websocket[network_id].send_message(message)
if not succcess:
# Try to reconnect
await self.reconnect()
self._last_websocket_ping = current_time
async def ws_send_message(self, msg: dict, network_id: str) -> None:
"""Send websocket message to casambi api"""
await self.ws_ping()
LOGGER.debug(f"Sending websocket message: msg {msg}")
succcess = await self.websocket[network_id].send_message(msg)
if not succcess:
# Try to reconnect
await self.reconnect()
def get_websocket_states(self) -> str:
"""Getter for websocket state"""
result = []
for network_id, _ in self.websocket.items():
result.append(self.websocket[network_id].state)
return result
async def stop_websockets(self) -> None:
"""Close websession and websocket to Casambi."""
LOGGER.info("Shutting down connections to Casambi.")
for network_id, _ in self.websocket.items():
await self.stop_websocket(network_id=network_id)
async def stop_websocket(self, *, network_id: str) -> None:
"""Close websession and websocket to Casambi."""
LOGGER.info("Shutting down connections to Casambi.")
if network_id in self.websocket:
self.websocket[network_id].stop()
def session_handler(self, signal: str, wire_id: str) -> None:
"""Signalling from websocket.
data - new data available for processing.
state - network state has changed.
"""
if len(self.websocket) == 0:
return
LOGGER.debug(f"session_handler: websockets {self.websocket}")
if signal == SIGNAL_DATA:
LOGGER.debug(f"session_handler is handling SIGNAL_DATA: {signal}")
network_id = self._wire_id_to_network_id[wire_id]
new_items = self.message_handler(self.websocket[network_id].data, wire_id)
if new_items and self.callback:
self.callback(SIGNAL_DATA, new_items)
elif signal == SIGNAL_CONNECTION_STATE and self.callback:
dbg_msg = "session_handler is handling"
dbg_msg += f"SIGNAL_CONNECTION_STATE: {signal}"
LOGGER.debug(dbg_msg)
network_id = self._wire_id_to_network_id[wire_id]
self.callback(SIGNAL_CONNECTION_STATE, self.websocket[network_id].state)
else:
LOGGER.debug(f"session_handler is NOT handling signal: {signal}")
def message_handler(self, message: dict, wire_id: str) -> dict:
"""
Receive event from websocket and identifies where the event belong.
"""
changes = {}
LOGGER.debug(f"message_handler recieved websocket message: {message}")
# Signaling of online gateway
# {'wire': 9, 'method': 'peerChanged', 'online': True}
# {'method': 'peerChanged', 'online': False, 'wire': 9}
#
# New state
# {
# 'condition': 0.0,
# 'wire': 9,
# 'activeSceneId': 0,
# 'controls':
# [
# {
# 'type': 'Overheat',
# 'status': 'ok'
# },
# {
# 'type': 'Dimmer',
# 'value': 0.0
# }
# ],
# 'sensors': [],
# 'method': 'unitChanged',
# 'online': True,
# 'details': {
# '_name': 'ffff',
# 'name': 'Name',
# 'address': 'fffff',
# 'fixture_model': 'LD220WCM',
# 'fixture': 859.0,
# 'OEM': 'Vadsbo'
# },
# 'id': 8,
# 'priority': 3.0,
# 'on': True,
# 'status': 'ok'
# }
network_id = self._wire_id_to_network_id[wire_id]
try:
if "method" in message and message["method"] == "unitChanged":
changes = self.units[network_id].process_unit_event(message)
elif "method" in message and message["method"] == "peerChanged":
changes = self.units[network_id].handle_peer_changed(message)
except TypeError as err:
dbg_msg = "message_handler in controller caught TypeError"
dbg_msg += f" for message: {message} error: {err}"
LOGGER.debug(dbg_msg)
raise err
return changes
async def check_connection(self) -> None:
"""async function for checking connection"""
all_running = True
states = self.get_websocket_states()
for state in states:
if state != STATE_RUNNING:
all_running = False
if all_running:
return
# Try to reconnect
await self.reconnect()
async def reconnect(self) -> None:
"""async function for reconnecting."""
LOGGER.debug("Controller is reconnecting")
if self._reconnecting:
return
self._reconnecting = True
# Trying to reconnect
reconnect_counter = 0
while True:
try:
reconnect_counter += 1
dbg_msg = "Controller is trying to reconnect, "
dbg_msg += f"try: {reconnect_counter}"
LOGGER.debug(dbg_msg)
await self.create_session()
except RateLimit as err:
LOGGER.debug(f"caught RateLimit exception: {err}, trying again")
await sleep(self.network_timeout)
continue
except client_exceptions.ClientConnectorError:
dbg_msg = "caught "
dbg_msg += "aiohttp.client_exceptions.ClientConnectorError, "
dbg_msg += "trying again"
LOGGER.debug(dbg_msg)
await sleep(self.network_timeout)
continue
except TimeoutError:
LOGGER.debug(
"caught asyncio.TimeoutError during reconnection, trying again"
)
await sleep(self.network_timeout)
continue
# Reconnected
self._reconnecting = False
break
# Set new session ids for websocket
for network_id in self.websocket.keys():
self.websocket[network_id].session_id = self._session_ids[network_id]
LOGGER.debug("Controller is reconnected")
async def turn_unit_on(self, *, unit_id: int, network_id: str) -> None:
"""
Turn unit on
"""
await self.units[network_id].turn_unit_on(unit_id=unit_id)
async def turn_unit_off(self, *, unit_id: int, network_id: str) -> None:
"""
Turn unit off
"""
await self.units[network_id].turn_unit_off(unit_id=unit_id)
def unit_supports_rgb(self, *, unit_id: int, network_id: str) -> bool:
"""
Check if unit supports rgb
"""
result = self.units[network_id].supports_rgb(unit_id=unit_id)
return result
def unit_supports_rgbw(self, *, unit_id: int, network_id: str) -> bool:
"""
Check if unit supports color rgbw
"""
result = self.units[network_id].supports_rgbw(unit_id=unit_id)
return result
def unit_supports_color_temperature(self, *, unit_id: int, network_id: str) -> bool:
"""
Check if unit supports color temperature
"""
result = self.units[network_id].supports_color_temperature(unit_id=unit_id)
return result
def get_supported_color_temperature(
self, *, unit_id: int, network_id: str
) -> Tuple[int, int, int]:
"""
Get supported color temperatures
"""
(cct_min, cct_max, current) = self.units[
network_id
].get_supported_color_temperature(unit_id=unit_id)
return (cct_min, cct_max, current)
def unit_supports_brightness(self, *, unit_id: int, network_id: str) -> bool:
"""
Check if unit supports color temperature
"""
result = self.units[network_id].supports_brightness(unit_id=unit_id)
return result
def unit_supports_distribution(self, *, unit_id: int, network_id: str) -> bool:
"""
Check if unit supports distribution
"""
result = self.units[network_id].supports_distribution(unit_id=unit_id)
return result
def set_wire_id(self, *, wire_id: int, network_id: str) -> None:
self.units[network_id].set_wire_id(wire_id=wire_id)
self.scenes[network_id].set_wire_id(wire_id=wire_id)
async def set_unit_rgbw(
self,
*,
unit_id: int,
network_id: str,
color_value: Tuple[int, int, int, int],
send_rgb_format=False,
) -> None:
"""
Set unit color temperature
"""
await self.units[network_id].set_unit_rgbw(
unit_id=unit_id,
color_value=color_value,
)
async def set_unit_rgb(
self,
*,
unit_id: int,
network_id: str,
color_value: Tuple[int, int, int],
send_rgb_format=False,
) -> None:
"""
Set unit color temperature
"""
await self.units[network_id].set_unit_rgb(
unit_id=unit_id, color_value=color_value, send_rgb_format=send_rgb_format
)
async def set_unit_color_temperature(
self, *, unit_id: int, network_id: str, value: int, source: str = "TW"
) -> None:
"""
Set unit color temperature
"""
await self.units[network_id].set_unit_color_temperature(
unit_id=unit_id, value=value, source=source
)
async def __remove_network_id(self, *, network_id: str) -> None:
"""
Private function for removing network_id
"""
wire_ids_to_remove = []
if network_id in self.websocket:
# Stopping websocket
await self.websocket.stop_websocket(network_id=network_id)
self.websocket.pop(network_id)
if network_id in self._network_ids:
self._network_ids.pop(network_id)
if network_id in self._session_ids:
self.set_session_id.pop(network_id)
for wire_id, wire_network_id in self._wire_id_to_network_id.items():
if wire_network_id == network_id:
wire_ids_to_remove.append(wire_id)
for wire_id in wire_ids_to_remove:
self._wire_id_to_network_id.pop(wire_id)
if network_id in self.units:
self.units.pop(network_id)
if network_id in self.scenes:
self.scenes.pop(network_id)
async def request(
self, method, json=None, url=None, headers=None, **kwargs
) -> dict:
"""Make a request to the API."""
await self.ws_ping()
LOGGER.debug(f"request url: {url}")
try:
async with self.session.request(
method,
url,
json=json,
ssl=self.sslcontext,
headers=headers,
**kwargs,
) as res:
LOGGER.debug(f"request: {res.status} {res.content_type} {res}")
if res.status == 401:
raise LoginRequired(f"Call {url} received 401 Unauthorized")
if res.status == 404:
raise ResponseError(f"Call {url} received 404 Not Found")
if res.status == 410:
raise ResponseError(f"Call {url} received 410 Gone")
if res.status == 429:
raise RateLimit(
f"Call {url} received 429 Server rate limit exceeded!"
)
if res.status == 500:
log_msg = f"Server Error: url: {url} "
log_msg += f"headers: {headers} "
log_msg += f"status: {res.status} "
log_msg += f"response: {res}"
raise CasambiAPIServerError(log_msg)
if res.content_type == "application/json":
response = await res.json()
return response
return res
except client_exceptions.ClientError as err:
raise err
| StarcoderdataPython |
3234682 | '''
To render 3d scenes (or even high dimensional), the first thing we need is
the ability to rotate the objects we render and view them from different angles.
This can be done with rotation matrices or quaternions.
We favor the rotation matrix since they are simpler and can be used in spaces of arbitrary dimensionality.
Rotation matrices are simply collections of orthonormal vectors that form a basis of the space we are in.
This module provides methods to generate various kinds of rotation matrices.
'''
import numpy as np
from PIL import Image, ImageDraw, ImageFont, ImageMath
def planar_rotation(theta = np.pi*3/20.0):
"""
Returns a simple planar rotation matrix for rotating vectors
in the 2-d plane about the origin.
args:
theta: The angle by which we will perform the rotation.
"""
r = np.eye(2)
r[0,0] = np.cos(theta)
r[1,0] = -np.sin(theta)
r[0,1] = np.sin(theta)
r[1,1] = np.cos(theta)
return r
def generalized_planar_rotation(pt, center, r):
"""
Rotates a 2-d point about a central point.
args:
pt: The point to rotate.
center: The point about which to rotate.
theta: The angle by which we will perform the rotation.
"""
# First, express the point in the central coordinate system.
pt_1 = pt - center
# Now, rotate this point.
pt_1 = np.dot(r, pt_1)
# Project back to original coordinate system.
return pt_1 + center
def yzrotation(theta = np.pi*3/20.0):
"""
Returns a simple planar rotation matrix that rotates
vectors around the x-axis.
args:
theta: The angle by which we will perform the rotation.
"""
r = np.eye(3)
r[1,1] = np.cos(theta)
r[1,2] = -np.sin(theta)
r[2,1] = np.sin(theta)
r[2,2] = np.cos(theta)
return r
def general_rotation(a, theta):
"""
Applies to 3-d space.
Returns a 3x3 rotation matrix given the
axis to rotate about and the angle to rotate by.
Rotations are performed about the origin.
args:
a: The axis to rotate about
theta: The angle to rotate by.
"""
c = np.cos(theta)
s = np.sin(theta)
a = a/sum(a**2)**0.5
[ax,ay,az] = a[:3]
return np.array(
[
[c + ax**2 * (1-c), ax*ay*(1-c) - az*s, ax*az*(1-c) + ay*s],
[ay*ax*(1-c)+az*s, c + ay**2*(1-c), ay*az*(1-c)-ax*s],
[az*ax*(1-c)-ay*s, az*ay*(1-c) + ax*s, c+az**2*(1-c)]
])
def axis_rotation(pt1, pt2, theta):
"""
Applies to 3-d space.
Performs a rotation about an axis given by two points
not necessarily centered at the origin. Unfortunately,
we need to return a 4x4 matrix here since translations
can't be expressed as a 3x3 matrix. So, it is the users
responsibility to add a 1 to any vector they are applying
this matrix to so that is is 4 dimensional. Also, unlike
general_rotation, this will only work for vectors post-multiplied
to the matrix.
Refer to:
http://paulbourke.net/geometry/rotate/
https://en.wikipedia.org/wiki/Translation_(geometry).
args:
pt1: The first point of the axis of rotation.
pt2: The second point of the axis of rotation.
theta: The angle by which the rotation will occur.
"""
tr = np.eye(4)
tr_inv = np.eye(4)
tr[0,3] = -pt1[0]
tr[1,3] = -pt1[1]
tr[2,3] = -pt1[2]
tr_inv[0,3] = pt1[0]
tr_inv[1,3] = pt1[1]
tr_inv[2,3] = pt1[2]
rot = np.eye(4)
rot[:3,:3] = general_rotation(pt2-pt1,theta)
return np.dot(np.dot(tr_inv,rot),tr)
def axisangle(a, theta):
"""
A wrapper to general_rotation named more appropriately.
Returns a rotation matrix that rotates about an axis by an angle.
args:
a: The axis to rotate about
theta: The angle to rotate by.
"""
return general_rotation(a,theta)
def matrix_to_axisangle(m):
"""
For a rotation matrix in 3 dimensions (rotations about the origin), we need 4 parameters.
An axis about which we are going to rotate and an angle which is the extent of rotation.
This method takes the rotation matrix in as input and returns an axis and angle combination that would generate that matrix.
args:
m: The rotation matrix (collection of orthonormal vectors in a matrix).
"""
theta = np.arccos(( m[0,0] + m[1,1] + m[2,2] - 1)/2)
x = (m[2,1] - m[1,2])/np.sqrt((m[2,1] - m[1,2])**2+(m[0,2] - m[2,0])**2+(m[1,0] - m[0,1])**2)
y = (m[0,2] - m[2,0])/np.sqrt((m[2,1] - m[1,2])**2+(m[0,2] - m[2,0])**2+(m[1,0] - m[0,1])**2)
z = (m[1,0] - m[0,1])/np.sqrt((m[2,1] - m[1,2])**2+(m[0,2] - m[2,0])**2+(m[1,0] - m[0,1])**2)
return (theta, np.array([x,y,z]))
def rotation(n, theta = np.pi/3):
"""
Returns a general rotation matrix of any dimensionality. This is achieved by a sequence of successive 2d rotations.
http://www.continuummechanics.org/rotationmatrix.html
args:
n : The dimensionality of the space in which we are going to rotate things.
theta: The angle of rotation for each of the planar 2-d rotation matrices.
"""
r = np.eye(n)
for i in range(n):
for j in range(i+1,n):
rij = np.eye(n)
rij[i,i] = np.cos(theta)
rij[i,j] = -np.sin(theta)
rij[j,i] = np.sin(theta)
rij[j,j] = np.cos(theta)
r = np.dot(r,rij)
return r
def rotation_transition(i = 0, oldr = general_rotation(np.array([1,0,0]),np.pi/2), newr = rotation(3,2*np.pi*4/30.0)):
"""
A sequence of intermediate rotations that take the system from an initial rotated state (oldr) to a final one (newr).
args:
i: 1 means complete rotation to new coordinates, 0 means old rotation.
oldr: Old rotation matrix.
newr: New rotation matrix.
"""
transn = np.dot(newr,np.transpose(oldr))
(theta, vec) = matrix_to_axisangle(transn)
r = general_rotation(vec, i*theta)
return np.dot(r, oldr)
def rotate_vec2vec(oldvec, newvec):
"""
What rotation matrix is needed to rotate an old vector to a new vector.
args:
oldvec: The vector we are starting with.
newvec: The vector to which we are rotating.
"""
axis = np.cross(oldvec, newvec)
oldvec1 = oldvec / np.sqrt(sum(oldvec**2))
newvec1 = newvec / np.sqrt(sum(newvec**2))
theta = np.arccos(sum(oldvec1*newvec1))
return axisangle(axis, theta)
| StarcoderdataPython |
1670125 | import os
import simpy
import numpy as np
from pathlib import Path
from RideSimulator.Grid import Grid
from RideSimulator.Trip import Trip
lat_points, lon_points, trip_distances, trips_per_min = None, None, None, None
def read_data(directory="data", lon_file="lon_points", lat_file="lat_points", distance_file="trip_distances",
min_file="trips_per_min"):
global lat_points, lon_points, trip_distances, trips_per_min
data_path = directory
if Path(os.getcwd()).parts[-1] != "RideSimulator":
data_path = os.path.join(Path(os.getcwd()).parent, os.path.join('RideSimulator', directory))
print("Loading trip data...")
lat_points = np.loadtxt(os.path.join(data_path, lat_file))
lon_points = np.loadtxt(os.path.join(data_path, lon_file))
trip_distances = np.loadtxt(os.path.join(data_path, distance_file))
trips_per_min = np.loadtxt(os.path.join(data_path, min_file))
print("Data loading complete")
class TripGenerator(object):
def __init__(self, grid: Grid, time_unit, trips_per_week=20000, seed: int = None):
if seed is not None:
np.random.seed(seed)
self.id = 0
self.grid = grid
self.width = grid.width
self.height = grid.height
self.granularity = 1000
self.min_trip_distance = self.width / 100
self.max_displacement = self.width * 0.05 * self.granularity
self.time_unit = time_unit
self.units_per_day = time_unit * 60 * 24
time_slice = self.units_per_day // 24
self.peak_times = [time_slice * 4, time_slice * 5, time_slice * 14, time_slice * 15]
self.lat_points, self.lon_points, self.trip_distances = self.import_data()
self.trips_per_min = self.scale_trip_count(trips_per_week)
self.updated_hex_ids = set()
def import_data(self):
lat_points_copy = lat_points.copy()
lon_points_copy = lon_points.copy()
trip_distances_copy = trip_distances.copy()
np.random.shuffle(lat_points_copy)
np.random.shuffle(lon_points_copy)
np.random.shuffle(trip_distances_copy)
lat_points_scaled = lat_points_copy - lat_points_copy.min()
lon_points_scaled = lon_points - lon_points_copy.min()
lat_scale = self.height / lat_points_scaled.max()
lon_scale = self.width / lon_points_scaled.max()
distance_scale = self.width / trip_distances_copy.max()
lat_points_scaled = lat_points_scaled * lat_scale
lon_points_scaled = lon_points_scaled * lon_scale
distances_scaled = trip_distances * distance_scale
return lat_points_scaled, lon_points_scaled, distances_scaled
@staticmethod
def scale_trip_count(trips_per_week):
scale = trips_per_min.sum() * 7 // trips_per_week
scaled_hourly = (trips_per_min / scale).astype(int)
dif = 0
sampled_trips = []
for i in range(24):
h = i * 60
full_trips = (trips_per_min[h:h + 60] / scale) // 1
prob_trips = (trips_per_min[h:h + 60] / scale) % 1
total_trips = []
sample_trips = (np.random.rand(60) < prob_trips).astype(int)
total_trips += (full_trips + sample_trips).tolist()
sampled_trips += (full_trips + sample_trips).tolist()
trip_count = int(sum(total_trips) / 100)
dif += trip_count - scaled_hourly[i]
return np.array(sampled_trips).astype(int)
def get_trip_locations(self, x, y, distance):
x = min(self.width, x + np.random.randint(0, self.max_displacement) / self.granularity)
y = min(self.height, y + np.random.randint(0, self.max_displacement) / self.granularity)
start_hex = self.grid.hex_overlay.get_closest_hex([x, y])
start_hex.trip_count += 1
theta = np.rad2deg(np.random.random() * 2 * np.pi)
while distance < self.min_trip_distance:
distance = distance * 1.5
while True:
d_x = x + distance * np.cos(theta)
d_y = y + distance * np.sin(theta)
x_count = 0
y_count = 0
while (d_x > self.width or d_x < 0) and x_count < 2:
# print("switching x direction", d_x)
d_x = x - distance * np.cos(theta)
x_count += 1
while (d_y > self.height or d_y < 0) and y_count < 2:
# print("switching y direction", d_y)
d_y = y - distance * np.sin(theta)
y_count += 1
if x_count == 2 or y_count == 2:
# print("Reducing distance", distance)
distance = distance / 2
else:
prob = np.random.random()
if prob < 0.2:
return np.array([[d_x, d_y], [x, y], [start_hex.id, start_hex.additional_reward]])
else:
return np.array([[x, y], [d_x, d_y], [start_hex.id, start_hex.additional_reward]])
def create_trip(self, env: simpy.Environment, trip_id: int) -> Trip:
"""
Creates a trip in the env with the given trip_id.
The trip will have randomly generated pickup and drop locations, and the id of the nearest driver pool is
assigned to the trip.
Pickup and drop locations will not be the same.
:param env: simpy environment
:param trip_id: trip id
:return: trip object
"""
distance = self.trip_distances[trip_id]
pick_up_loc, drop_loc, hex_data = self.get_trip_locations(self.lon_points[trip_id], self.lat_points[trip_id],
distance)
self.updated_hex_ids.add(hex_data[0])
nearest_spot = self.grid.get_nearest_spot(pick_up_loc)[0]
trip_i = Trip(env, trip_id, pick_up_loc, drop_loc, [nearest_spot], hex_data[0], hex_data[1])
return trip_i
def generate_trips(self, env: simpy.Environment):
peak_time = False
day_time = int((env.now % self.units_per_day) / self.time_unit)
num_trips = self.trips_per_min[day_time]
trips = []
for _ in range(num_trips):
trips.append(self.create_trip(env, self.id))
self.id += 1
return trips, peak_time
| StarcoderdataPython |
3344371 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.beans
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
from ..lang.event_object import EventObject as EventObject_a3d70b03
from ..uno.x_interface import XInterface as XInterface_8f010a43
import typing
class PropertyChangeEvent(EventObject_a3d70b03):
"""
Struct Class
gets delivered whenever a \"bound\" or \"constrained\" property is changed.
A PropertyChangeEvent object is sent as an argument to the methods of XPropertyChangeListener and XVetoableChangeListener.
Normally such events contain the name and the old and new value of the changed property.
Void values may be provided for the old and new values if their true values are not known.
See Also:
`API PropertyChangeEvent <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1beans_1_1PropertyChangeEvent.html>`_
"""
__ooo_ns__: str = 'com.sun.star.beans'
__ooo_full_ns__: str = 'com.sun.star.beans.PropertyChangeEvent'
__ooo_type_name__: str = 'struct'
typeName: str = 'com.sun.star.beans.PropertyChangeEvent'
"""Literal Constant ``com.sun.star.beans.PropertyChangeEvent``"""
def __init__(self, Source: typing.Optional[XInterface_8f010a43] = None, PropertyName: typing.Optional[str] = '', Further: typing.Optional[bool] = False, PropertyHandle: typing.Optional[int] = 0, OldValue: typing.Optional[object] = None, NewValue: typing.Optional[object] = None) -> None:
"""
Constructor
Arguments:
Source (XInterface, optional): Source value.
PropertyName (str, optional): PropertyName value.
Further (bool, optional): Further value.
PropertyHandle (int, optional): PropertyHandle value.
OldValue (object, optional): OldValue value.
NewValue (object, optional): NewValue value.
"""
if isinstance(Source, PropertyChangeEvent):
oth: PropertyChangeEvent = Source
self.Source = oth.Source
self.PropertyName = oth.PropertyName
self.Further = oth.Further
self.PropertyHandle = oth.PropertyHandle
self.OldValue = oth.OldValue
self.NewValue = oth.NewValue
return
kargs = {
"Source": Source,
"PropertyName": PropertyName,
"Further": Further,
"PropertyHandle": PropertyHandle,
"OldValue": OldValue,
"NewValue": NewValue,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._property_name = kwargs["PropertyName"]
self._further = kwargs["Further"]
self._property_handle = kwargs["PropertyHandle"]
self._old_value = kwargs["OldValue"]
self._new_value = kwargs["NewValue"]
inst_keys = ('PropertyName', 'Further', 'PropertyHandle', 'OldValue', 'NewValue')
kargs = kwargs.copy()
for key in inst_keys:
del kargs[key]
super()._init(**kargs)
@property
def PropertyName(self) -> str:
"""
contains the unique name of the property which changes its value.
"""
return self._property_name
@PropertyName.setter
def PropertyName(self, value: str) -> None:
self._property_name = value
@property
def Further(self) -> bool:
"""
contains TRUE if further events in the same transaction occur.
"""
return self._further
@Further.setter
def Further(self, value: bool) -> None:
self._further = value
@property
def PropertyHandle(self) -> int:
"""
contains the implementation handle for the property.
May be -1 if the implementation has no handle. You can use this handle to get values from the XFastPropertySet.
"""
return self._property_handle
@PropertyHandle.setter
def PropertyHandle(self, value: int) -> None:
self._property_handle = value
@property
def OldValue(self) -> object:
"""
contains the old value of the property.
"""
return self._old_value
@OldValue.setter
def OldValue(self, value: object) -> None:
self._old_value = value
@property
def NewValue(self) -> object:
"""
contains the new value of the property.
"""
return self._new_value
@NewValue.setter
def NewValue(self, value: object) -> None:
self._new_value = value
__all__ = ['PropertyChangeEvent']
| StarcoderdataPython |
3339431 | from PyQt4 import Qt, QtCore
class OkCover(Qt.QWidget):
def __init__(self, parent=None):
Qt.QWidget.__init__(self, parent)
self.setWindowFlags(Qt.Qt.FramelessWindowHint)
self.setGeometry(QtCore.QRect(0, 0, 200, self.parent().height()))
#palette
palette = Qt.QPalette()
palette.setColor(Qt.QPalette.Background, Qt.QColor(255,255,255,128))
self.setPalette(palette)
self.setAutoFillBackground(True)
def paintEvent(self, event):
self.setGeometry(QtCore.QRect(0, 0, 200 , self.parent().height()))
| StarcoderdataPython |
196211 | """
Module description:
"""
__version__ = '0.3.0'
__author__ = '<NAME>, <NAME>, <NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>, <EMAIL>'
from .AMR import AMR | StarcoderdataPython |
196785 | <filename>hifive/api/rest/HFBaseWeatherRequest.py
'''
Created by yong.huang on 2016.11.04
'''
from hifive.api.base import RestApi
class HFBaseWeatherRequest(RestApi):
def __init__(self,domain=None,port=80):
domain = domain or 'hifive-gateway-test.hifiveai.com';
RestApi.__init__(self,domain, port)
self.clientId = None
self.location = None
def getapiname(self):
return 'BaseWeather'
| StarcoderdataPython |
10649 | <reponame>oyasr/mudawen<gh_stars>0
import os
from dotenv import load_dotenv
load_dotenv()
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY') or os.urandom(32)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = os.getenv('MAIL_SERVER') or 'smtp.googlemail.com'
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.getenv('MAIL_USERNAME')
MAIL_PASSWORD = <PASSWORD>('<PASSWORD>')
MUDAWEN_MAIL_SUBJECT_PREFIX = '[Mudawen]'
MUDAWEN_MAIL_SENDER = '<NAME> <<EMAIL>>'
MUDAWEN_ADMIN = os.getenv('MUDAWEN_ADMIN')
MUDAWEN_POSTS_PER_PAGE = 20
MUDAWEN_FOLLOWERS_PER_PAGE = 50
MUDAWEN_COMMENTS_PER_PAGE = 30
MUDAWEN_QUERY_TIME_LIMIT = 0.5
@staticmethod
def init_app(app):
pass
class DevConfig(Config):
ENV = 'development'
SQLALCHEMY_DATABASE_URI = os.getenv('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = os.getenv('TEST_DATABASE_URL') or \
'sqlite://'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevConfig,
'testing': TestConfig,
'production': ProductionConfig,
'default': DevConfig
}
| StarcoderdataPython |
1792033 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import boto3
import ban_handler
import cgf_lambda_settings
import cgf_service_client
import errors
import identity_validator
import service
UNKNOWN_PLAYER_ERROR_MESSAGE = "User '{}' is not registered with the PlayerAccount Gem or has not sent data to the Leaderboards Gem"
@service.api
def post(request, user=None):
"""
Call PlayerAccount to ban the player.
Player must be a registered uer in the PlayerAccount Gem and Leaderboards must have seen the player
via a data request to have a mapping between the user name and the cognition identity (for get_id_from_user)
"""
print("Handling player ban for {}".format(user))
interface_url = cgf_lambda_settings.get_service_url("CloudGemPlayerAccount_banplayer_1_0_0")
if not interface_url:
return {
"status": ban_handler.ban(user)
}
service_client = cgf_service_client.for_url(interface_url, verbose=True, session=boto3._get_default_session())
navigation = service_client.navigate('playerban')
cog_id = identity_validator.get_id_from_user(user)
if cog_id is None:
raise errors.ClientError(UNKNOWN_PLAYER_ERROR_MESSAGE.format(user))
result = navigation.POST(
{"id": cog_id}
)
return result.DATA
@service.api
def delete(request, user=None):
"""
Call PlayerAccount to unban the player
Player must be a registered uer in the PlayerAccount Gem and Leaderboards must have seen the player
via a data request to have a mapping between the user name and the cognition identity (for get_id_from_user)
"""
print("Handling player unban for {}".format(user))
interface_url = cgf_lambda_settings.get_service_url("CloudGemPlayerAccount_banplayer_1_0_0")
if not interface_url:
return {
"status": ban_handler.lift_ban(user)
}
service_client = cgf_service_client.for_url(interface_url, verbose=True, session=boto3._get_default_session())
navigation = service_client.navigate('playerban')
cog_id = identity_validator.get_id_from_user(user)
if cog_id is None:
raise errors.ClientError(UNKNOWN_PLAYER_ERROR_MESSAGE.format(user))
result = navigation.DELETE(
{"id": cog_id}
)
return result.DATA
| StarcoderdataPython |
77238 | <filename>generator.py<gh_stars>0
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.layers import batch_norm, fully_connected, flatten
from tensorflow.contrib.layers import xavier_initializer
from ops import *
import numpy as np
class Generator(object):
def __init__(self, segan):
self.segan = segan
def __call__(self, noisy_w, is_ref, spk=None):
""" Build the graph propagating (noisy_w) --> x
On first pass will make variables.
"""
segan = self.segan
def make_z(shape, mean=0., std=1., name='z'):
if is_ref:
with tf.variable_scope(name) as scope:
z_init = tf.random_normal_initializer(mean=mean, stddev=std)
z = tf.get_variable("z", shape,
initializer=z_init,
trainable=False
)
if z.device != "/device:GPU:0":
# this has to be created into gpu0
print('z.device is {}'.format(z.device))
assert False
else:
z = tf.random_normal(shape, mean=mean, stddev=std,
name=name, dtype=tf.float32)
return z
if hasattr(segan, 'generator_built'):
tf.get_variable_scope().reuse_variables()
make_vars = False
else:
make_vars = True
print('*** Building Generator ***')
in_dims = noisy_w.get_shape().as_list()
h_i = noisy_w
if len(in_dims) == 2:
h_i = tf.expand_dims(noisy_w, -1)
elif len(in_dims) < 2 or len(in_dims) > 3:
raise ValueError('Generator input must be 2-D or 3-D')
kwidth = 3
z = make_z([segan.batch_size, h_i.get_shape().as_list()[1],
segan.g_enc_depths[-1]])
h_i = tf.concat([h_i, z], 2)
skip_out = True
skips = []
for block_idx, dilation in enumerate(segan.g_dilated_blocks):
name = 'g_residual_block_{}'.format(block_idx)
if block_idx >= len(segan.g_dilated_blocks) - 1:
skip_out = False
if skip_out:
res_i, skip_i = residual_block(h_i,
dilation, kwidth, num_kernels=32,
bias_init=None, stddev=0.02,
do_skip = True,
name=name)
else:
res_i = residual_block(h_i,
dilation, kwidth, num_kernels=32,
bias_init=None, stddev=0.02,
do_skip = False,
name=name)
# feed the residual output to the next block
h_i = res_i
if segan.keep_prob < 1:
print('Adding dropout w/ keep prob {} '
'to G'.format(segan.keep_prob))
h_i = tf.nn.dropout(h_i, segan.keep_prob_var)
if skip_out:
# accumulate the skip connections
skips.append(skip_i)
else:
# for last block, the residual output is appended
skips.append(res_i)
print('Amount of skip connections: ', len(skips))
# TODO: last pooling for actual wave
with tf.variable_scope('g_wave_pooling'):
skip_T = tf.stack(skips, axis=0)
skips_sum = tf.reduce_sum(skip_T, axis=0)
skips_sum = leakyrelu(skips_sum)
wave_a = conv1d(skips_sum, kwidth=1, num_kernels=1,
init=tf.truncated_normal_initializer(stddev=0.02))
wave = tf.tanh(wave_a)
segan.gen_wave_summ = histogram_summary('gen_wave', wave)
print('Last residual wave shape: ', res_i.get_shape())
print('*************************')
segan.generator_built = True
return wave, z
class AEGenerator(object):
def __init__(self, segan):
self.segan = segan
def __call__(self, noisy_w, is_ref, spk=None, z_on=True, do_prelu=False):
# TODO: remove c_vec
""" Build the graph propagating (noisy_w) --> x
On first pass will make variables.
"""
segan = self.segan
def make_z(shape, mean=0., std=1., name='z'):
if is_ref:
with tf.variable_scope(name) as scope:
z_init = tf.random_normal_initializer(mean=mean, stddev=std)
z = tf.get_variable("z", shape,
initializer=z_init,
trainable=False
)
if z.device != "/device:GPU:0":
# this has to be created into gpu0
print('z.device is {}'.format(z.device))
assert False
else:
z = tf.random_normal(shape, mean=mean, stddev=std,
name=name, dtype=tf.float32)
return z
if hasattr(segan, 'generator_built'):
tf.get_variable_scope().reuse_variables()
make_vars = False
else:
make_vars = True
if is_ref:
print('*** Building Generator ***')
in_dims = noisy_w.get_shape().as_list()
h_i = noisy_w
if len(in_dims) == 2:
h_i = tf.expand_dims(noisy_w, -1)
elif len(in_dims) < 2 or len(in_dims) > 3:
raise ValueError('Generator input must be 2-D or 3-D')
kwidth = 31
enc_layers = 7
skips = []
if is_ref and do_prelu:
#keep track of prelu activations
alphas = []
with tf.variable_scope('g_ae'):
#AE to be built is shaped:
# enc ~ [16384x1, 8192x16, 4096x32, 2048x32, 1024x64, 512x64, 256x128, 128x128, 64x256, 32x256, 16x512, 8x1024]
# dec ~ [8x2048, 16x1024, 32x512, 64x512, 8x256, 256x256, 512x128, 1024x128, 2048x64, 4096x64, 8192x32, 16384x1]
#FIRST ENCODER
for layer_idx, layer_depth in enumerate(segan.g_enc_depths):
h_i_dwn = downconv(h_i, layer_depth, kwidth=kwidth,
init=tf.truncated_normal_initializer(stddev=0.02),
name='enc_{}'.format(layer_idx))
if is_ref:
print('Downconv {} -> {}'.format(h_i.get_shape(),
h_i_dwn.get_shape()))
h_i = h_i_dwn
if layer_idx < len(segan.g_enc_depths) - 1:
if is_ref:
print('Adding skip connection downconv '
'{}'.format(layer_idx))
# store skip connection
# last one is not stored cause it's the code
skips.append(h_i)
if do_prelu:
if is_ref:
print('-- Enc: prelu activation --')
h_i = prelu(h_i, ref=is_ref, name='enc_prelu_{}'.format(layer_idx))
if is_ref:
# split h_i into its components
alpha_i = h_i[1]
h_i = h_i[0]
alphas.append(alpha_i)
else:
if is_ref:
print('-- Enc: leakyrelu activation --')
h_i = leakyrelu(h_i)
if z_on:
# random code is fused with intermediate representation
z = make_z([segan.batch_size, h_i.get_shape().as_list()[1],
segan.g_enc_depths[-1]])
h_i = tf.concat([z, h_i], 2)
#SECOND DECODER (reverse order)
g_dec_depths = segan.g_enc_depths[:-1][::-1] + [1]
if is_ref:
print('g_dec_depths: ', g_dec_depths)
for layer_idx, layer_depth in enumerate(g_dec_depths):
h_i_dim = h_i.get_shape().as_list()
out_shape = [h_i_dim[0], h_i_dim[1] * 2, layer_depth]
# deconv
h_i_dcv = deconv(h_i, out_shape, kwidth=kwidth, dilation=2,
init=tf.truncated_normal_initializer(stddev=0.02),
name='dec_{}'.format(layer_idx))
if is_ref:
print('Deconv {} -> {}'.format(h_i.get_shape(),
h_i_dcv.get_shape()))
h_i = h_i_dcv
if layer_idx < len(g_dec_depths) - 1:
if do_prelu:
if is_ref:
print('-- Dec: prelu activation --')
h_i = prelu(h_i, ref=is_ref,
name='dec_prelu_{}'.format(layer_idx))
if is_ref:
# split h_i into its components
alpha_i = h_i[1]
h_i = h_i[0]
alphas.append(alpha_i)
else:
if is_ref:
print('-- Dec: leakyrelu activation --')
h_i = leakyrelu(h_i)
# fuse skip connection
skip_ = skips[-(layer_idx + 1)]
if is_ref:
print('Fusing skip connection of '
'shape {}'.format(skip_.get_shape()))
h_i = tf.concat([h_i, skip_], 2)
else:
if is_ref:
print('-- Dec: tanh activation --')
h_i = tf.tanh(h_i)
wave = h_i
if is_ref and do_prelu:
print('Amount of alpha vectors: ', len(alphas))
segan.gen_wave_summ = histogram_summary('gen_wave', wave)
if is_ref:
print('Amount of skip connections: ', len(skips))
print('Last wave shape: ', wave.get_shape())
print('*************************')
segan.generator_built = True
# ret feats contains the features refs to be returned
ret_feats = [wave]
if z_on:
ret_feats.append(z)
if is_ref and do_prelu:
ret_feats += alphas
return ret_feats
| StarcoderdataPython |
3357483 | import pytest
from .node import Node
from .bst import BST
@pytest.fixture
def bst_ten_values_random():
""" returns a BST for a list of known values """
return BST([5,8,3,4,1,2,9,6,7,0])
@pytest.fixture
def bst_empty():
""" returns empty BST """
return BST() | StarcoderdataPython |
4836013 | <gh_stars>0
from __future__ import division
from protos import common_pb2
from protos import state_pb2
def do_something():
""" basic example of consumer of proto files """
print('do something with the protos...')
pos = common_pb2.Vec3()
pos.x = 12
print('pos val:',pos.x, pos.y, pos.z)
agent = state_pb2.Agent()
print(agent.id)
agent.pose.pos.CopyFrom(pos)
print('agent pose.pos:', agent.pose.pos)
print('agent pose', agent.pose)
agent.id = 123123
print(agent.id)
return True
if __name__ == "__main__":
do_something()
| StarcoderdataPython |
3338778 | # -*- coding: utf-8 -*-
# CMCBot Pipelines
from scrapy.exceptions import DropItem
from time import time, gmtime, strftime
from datetime import datetime
from hashlib import md5
from scrapy import log
from scrapy.exceptions import DropItem
from twisted.enterprise import adbapi
import logging
class ConvertLastUpdatedPipeline(object):
""" Converts the cmc n hours ago to n hours before the item was scraped in mysql datetime format """
def process_item(self, item, spider):
if item['last_updated']:
updated = item['last_updated'][0]
seconds = 0
if updated != 'Recently':
print("Exchange " + item['exchange'][0] + " for pair " + item['pair'][0] + " was updated " + updated);
seconds = int(updated.replace(' hours ago', '').replace(' hour ago', '')) * 3600
last_updated = time() - seconds
item['last_updated'] = strftime('%Y-%m-%d %H:%M:%S', gmtime(last_updated))
return item
else:
raise DropItem("Missing last_updated in %s" % item)
class RemovePercentSignPipeline(object):
""" Removes % from percentage """
def process_item(self, item, spider):
if item['market_percent']:
item['market_percent'] = item['market_percent'][0].replace('%', '')
return item
else:
raise DropItem("Missing last_updated in %s" % item)
class RequiredFieldsPipeline(object):
"""Ensures the item have the required fields."""
required_fields = ('ticker', 'pair', 'exchange', 'price_usd', 'price_btc', 'volume_usd', 'volume_btc', 'market_percent', 'last_updated')
def process_item(self, item, spider):
for field in self.required_fields:
if not item.get(field):
raise DropItem("Field '%s' missing: %r" % (field, item))
return item
class MySQLStorePipeline(object):
"""A pipeline to store the item in a MySQL database.
This implementation uses Twisted's asynchronous database API.
"""
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',
use_unicode=True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
# run db query in the thread pool
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
# at the end return the item in case of success or failure
d.addBoth(lambda _: item)
# return the deferred instead the item. This makes the engine to
# process next item (according to CONCURRENT_ITEMS setting) after this
# operation (deferred) has finished.
return d
def _do_upsert(self, conn, item, spider):
"""Perform an insert or update."""
guid = self._get_guid(item)
now = datetime.utcnow().replace(microsecond=0).isoformat(' ')
conn.execute("""SELECT EXISTS(
SELECT 1 FROM markets WHERE guid = %s
)""", (guid, ))
ret = conn.fetchone()[0]
if ret:
conn.execute("""
UPDATE markets
SET name=%s, ticker=%s, pair=%s, exchange=%s, price_usd=%s, price_btc=%s,
volume_usd=%s, volume_btc=%s, market_percent=%s, last_updated=%s, updated=%s
WHERE guid=%s
""", (item['name'], item['ticker'], item['pair'], item['exchange'], item['price_usd'],
item['price_btc'], item['volume_usd'], item['volume_btc'],
item['market_percent'], item['last_updated'], now, guid))
logging.log(logging.INFO, "Item updated in db: %s %r", guid, item)
else:
conn.execute("""
INSERT INTO markets (guid, name, ticker, pair, exchange, price_usd, price_btc,
volume_usd, volume_btc, market_percent, last_updated, updated)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", (guid, item['name'], item['ticker'], item['pair'], item['exchange'], item['price_usd'],
item['price_btc'], item['volume_usd'], item['volume_btc'],
item['market_percent'], item['last_updated'], now))
logging.log(logging.INFO, "Item stored in db: %s %r", guid, item)
def _handle_error(self, failure, item, spider):
"""Handle occurred on db interaction."""
# do nothing, just log
logging.log(logging.ERROR, failure)
def _get_guid(self, item):
"""Generates an unique identifier for a given item."""
# hash based in pair, exchange and ticker fields
return md5((item['pair'][0] + item['exchange'][0] + item['ticker'][0]).encode('utf-8')).hexdigest()
| StarcoderdataPython |
3235483 | <gh_stars>0
import ida_bytes
filename = 'C:\\Users\\User\\Desktop\\result.bin'
ea_begin = 0x001C0020
print('\n\nBegin')
with open(filename, 'rb') as input:
bytes = input.read()
print('Size of file = ', len(bytes))
ida_bytes.patch_bytes(ea_begin, bytes)
print('End')
| StarcoderdataPython |
101426 | <reponame>vinay4711/Hands-On-Natural-Language-Processing-with-Python<gh_stars>100-1000
from sklearn import metrics
from itertools import chain
from six.moves import range, reduce
import numpy as np
import tensorflow as tf
from data_utils import tokenize, parse_dialogs_per_response
from memory_network import MemoryNetwork
def vectorize_candidates(candidates, word_idx, sentence_size):
# Determine shape of final vector
shape = (len(candidates), sentence_size)
candidates_vector = []
for i, candidate in enumerate(candidates):
# Determine zero padding
zero_padding = max(0, sentence_size - len(candidate))
# Append to final vector
candidates_vector.append(
[word_idx[w] if w in word_idx else 0 for w in candidate]
+ [0] * zero_padding)
# Return as TensorFlow constant
return tf.constant(candidates_vector, shape=shape)
def vectorize_data(data, word_idx, sentence_size, batch_size, max_memory_size):
facts_vector = []
questions_vector = []
answers_vector = []
# Sort data in descending order by number of facts
data.sort(key=lambda x: len(x[0]), reverse=True)
for i, (fact, question, answer) in enumerate(data):
# Find memory size
if i % batch_size == 0:
memory_size = max(1, min(max_memory_size, len(fact)))
# Build fact vector
fact_vector = []
for i, sentence in enumerate(fact, 1):
fact_padding = max(0, sentence_size - len(sentence))
fact_vector.append(
[word_idx[w] if w in word_idx else 0 for w in sentence]
+ [0] * fact_padding)
# Keep the most recent sentences that fit in memory
fact_vector = fact_vector[::-1][:memory_size][::-1]
# Pad to memory_size
memory_padding = max(0, memory_size - len(fact_vector))
for _ in range(memory_padding):
fact_vector.append([0] * sentence_size)
# Build question vector
question_padding = max(0, sentence_size - len(question))
question_vector = [word_idx[w] if w in word_idx else 0
for w in question] \
+ [0] * question_padding
# Append to final vectors
facts_vector.append(np.array(fact_vector))
questions_vector.append(np.array(question_vector))
# Answer is already an integer corresponding to a candidate
answers_vector.append(np.array(answer))
return facts_vector, questions_vector, answers_vector
class ChatBotWrapper(object):
def __init__(self, train_data, test_data, val_data,
candidates, candidates_to_idx,
memory_size, batch_size, learning_rate,
evaluation_interval, hops,
epochs, embedding_size):
self.memory_size = memory_size
self.batch_size = batch_size
self.evaluation_interval = evaluation_interval
self.epochs = epochs
self.candidates = candidates
self.candidates_to_idx = candidates_to_idx
self.candidates_size = len(candidates)
self.idx_to_candidates = dict((self.candidates_to_idx[key], key)
for key in self.candidates_to_idx)
# Initialize data and build vocabulary
self.train_data = train_data
self.test_data = test_data
self.val_data = val_data
self.build_vocab(train_data + test_data + val_data, candidates)
# Vectorize candidates
self.candidates_vec = vectorize_candidates(
candidates, self.word_idx, self.candidate_sentence_size)
# Initialize optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Initialize TensorFlow session and Memory Network model
self.sess = tf.Session()
self.model = MemoryNetwork(
self.sentence_size, self.vocab_size,
self.candidates_size, self.candidates_vec,
embedding_size, hops,
optimizer=optimizer, session=self.sess)
def build_vocab(self, data, candidates):
# Build word vocabulary set from all data and candidate words
vocab = reduce(lambda x1, x2: x1 | x2,
(set(list(chain.from_iterable(facts)) + questions)
for facts, questions, answers in data))
vocab |= reduce(lambda x1, x2: x1 | x2,
(set(candidate) for candidate in candidates))
vocab = sorted(vocab)
# Assign integer indices to each word
self.word_idx = dict((word, idx + 1) for idx, word in enumerate(vocab))
# Compute various data size numbers
max_facts_size = max(map(len, (facts for facts, _, _ in data)))
self.sentence_size = max(
map(len, chain.from_iterable(facts for facts, _, _ in data)))
self.candidate_sentence_size = max(map(len, candidates))
question_size = max(map(len, (questions for _, questions, _ in data)))
self.memory_size = min(self.memory_size, max_facts_size)
self.vocab_size = len(self.word_idx) + 1 # +1 for null word
self.sentence_size = max(question_size, self.sentence_size)
def predict_for_batch(self, facts, questions):
preds = []
# Iterate over mini-batches
for start in range(0, len(facts), self.batch_size):
end = start + self.batch_size
facts_batch = facts[start:end]
questions_batch = questions[start:end]
# Predict per batch
pred = self.model.predict(facts_batch, questions_batch)
preds += list(pred)
return preds
def train(self):
# Vectorize training and validation data
train_facts, train_questions, train_answers = vectorize_data(
self.train_data, self.word_idx, self.sentence_size,
self.batch_size, self.memory_size)
val_facts, val_questions, val_answers = vectorize_data(
self.val_data, self.word_idx, self.sentence_size,
self.batch_size, self.memory_size)
# Chunk training data into batches
batches = zip(range(0, len(train_facts) - self.batch_size,
self.batch_size),
range(self.batch_size, len(train_facts),
self.batch_size))
batches = [(start, end) for start, end in batches]
# Start training loop
for epoch in range(1, self.epochs + 1):
np.random.shuffle(batches)
total_cost = 0.0
for start, end in batches:
facts = train_facts[start:end]
questions = train_questions[start:end]
answers = train_answers[start:end]
# Train on batch
batch_cost = self.model.fit(facts, questions, answers)
total_cost += batch_cost
if epoch % self.evaluation_interval == 0:
# Compute accuracy over training and validation set
train_preds = self.predict_for_batch(
train_facts, train_questions)
val_preds = self.predict_for_batch(
val_facts, val_questions)
train_acc = metrics.accuracy_score(
train_preds, train_answers)
val_acc = metrics.accuracy_score(
val_preds, val_answers)
print("Epoch: ", epoch)
print("Total Cost: ", total_cost)
print("Training Accuracy: ", train_acc)
print("Validation Accuracy: ", val_acc)
print("---")
def test(self):
# Compute accuracy over test set
test_facts, test_questions, test_answers = vectorize_data(
self.test_data, self.word_idx, self.sentence_size,
self.batch_size, self.memory_size)
test_preds = self.predict_for_batch(test_facts, test_questions)
test_acc = metrics.accuracy_score(test_preds, test_answers)
print("Testing Accuracy: ", test_acc)
def interactive_mode(self):
facts = []
utterance = None
response = None
turn_count = 1
while True:
line = input("==> ").strip().lower()
if line == "exit":
break
if line == "restart":
facts = []
turn_count = 1
print("Restarting dialog...\n")
continue
utterance = tokenize(line)
data = [(facts, utterance, -1)]
# Vectorize data and make prediction
f, q, a = vectorize_data(data, self.word_idx,
self.sentence_size, self.batch_size, self.memory_size)
preds = self.model.predict(f, q)
response = self.idx_to_candidates[preds[0]]
# Print predicted response
print(response)
response = tokenize(response)
# Add turn count temporal encoding
utterance.append("$u")
response.append("$r")
# Add utterance/response encoding
utterance.append("#" + str(turn_count))
response.append("#" + str(turn_count))
# Update facts memory
facts.append(utterance)
facts.append(response)
turn_count += 1
if __name__ == "__main__":
candidates = []
candidates_to_idx = {}
with open('dialog-babi/dialog-babi-candidates.txt') as f:
for i, line in enumerate(f):
candidates_to_idx[line.strip().split(' ', 1)[1]] = i
line = tokenize(line.strip())[1:]
candidates.append(line)
train_data = []
with open('dialog-babi/dialog-babi-task5-full-dialogs-trn.txt') as f:
train_data = parse_dialogs_per_response(f.readlines(), candidates_to_idx)
test_data = []
with open('dialog-babi/dialog-babi-task5-full-dialogs-tst.txt') as f:
test_data = parse_dialogs_per_response(f.readlines(), candidates_to_idx)
val_data = []
with open('dialog-babi/dialog-babi-task5-full-dialogs-dev.txt') as f:
val_data = parse_dialogs_per_response(f.readlines(), candidates_to_idx)
chatbot = ChatBotWrapper(train_data, test_data, val_data,
candidates, candidates_to_idx,
memory_size=50,
batch_size=32,
learning_rate=0.001,
evaluation_interval=10,
hops=3,
epochs=200,
embedding_size=50)
chatbot.train()
chatbot.test()
chatbot.interactive_mode()
| StarcoderdataPython |
114421 | <reponame>Joacchim/BookMyComics<gh_stars>0
import sys
def read_file(path):
try:
with open(path, 'r') as f:
return f.read()
except Exception as e:
print('Failed to read "{}": {}'.format(path, e))
return None
def write_file(content, path):
try:
with open(path, 'w') as f:
f.write(content)
return 0
except Exception as e:
print('Failed to write into "{}"'.format(path))
return 4
def switch_manifest(browser):
content = read_file('browsers/{}.json'.format(browser))
if content is None:
print('aborting...')
return 3
return write_file(content, 'web-extension/manifest.json')
def show_options():
print("Browser must be passed (either 'firefox' or 'chrome'). Example:")
print("> python setup.py firefox")
def main():
argv = sys.argv[1:]
if len(argv) != 1:
show_options()
return 1
if argv[0] == '-h' or argv[0] == '--help':
show_options()
return 0
if argv[0] not in ["firefox", "chrome"]:
print("Invalid browser passed")
show_options()
return 2
return switch_manifest(argv[0])
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1782287 | <gh_stars>1-10
""" python port of zombie/scripting/objects/Recipe.class
Original code copyright TheIndieStone.
python port by Fenris_Wolf
"""
import re
from zomboid.java import ArrayList, HashMap
from .base import BaseScriptObject
class Result:
type : str = None
count : int = 1
drainableCount : int = 0
module : str = None
def getType(self) -> str:
return self.type
def getCount(self) -> int:
return self.count
def getModule(self) -> str:
return self.module
def getFullType(self) -> str:
return f"{self.module}.{self.type}"
def getDrainableCount(self) -> int:
return self.drainableCount
class Source:
keep : bool = False
items : ArrayList = None
destroy : bool = False
count : float = 1
use : float = 0
def __init__(self):
self.items = ArrayList()
def isDestroy(self) -> bool:
return self.destroy
def isKeep(self) -> bool:
return self.keep
def getCount(self) -> int:
return self.count
def getUse(self) -> int:
return self.use
def getItems(self) -> ArrayList:
return self.items
class Recipe(BaseScriptObject):
canBeDoneFromFloor : bool = False
AnimNode : str = None
Prop1 : str = None
Prop2 : str = None
TimeToMake : float = 0.0
Sound : str = None
LuaTest : str = None
LuaCreate : str = None
LuaGrab : str = None
NeedToBeLearn : bool = False # this field is actually needToBeLearn, but theres also a method by this exact name....
removeResultItem : bool = False
skillRequired : HashMap = None
heat : float = 0.0
NoBrokenItems : bool = False # this field is actually NoBrokenItems, but theres also a method by this exact name....
name : str = 'recipe'
originalname : str = 'recipe'
Result : Result = None
Source : ArrayList = None
nearItem : str = None
def __init__(self):
super().__init__()
self.Source = ArrayList()
def setCanBeDoneFromFloor(self, value : bool) -> None:
self.canBeDoneFromFloor = value
def isCanBeDoneFromFloor(self) -> bool:
return self.canBeDoneFromFloor
def FindIndexOf(self, item : str) -> int:
return -1
def getSource(self) -> Source:
return self.Source
def getNumberOfNeededItems(self) -> int:
count = 0
for source in self.Source:
if source.items:
count += source.count
return count
def getTimeToMake(self) -> float:
return self.TimeToMake
def getName(self) -> str:
return self.name
def getFullType(self) -> str:
return f"{self.module}.{self.originalname}"
def Load(self, name : str, data : list) -> None:
self.name = name # TODO: Translator.getRecipeName
self.originalname = name
override = False
for line in data:
line = line.strip()
if not line:
continue
match = re.match(r"([^:]+)\s*:\s*(.+)\s*", line, flags=re.DOTALL | re.M)
if not match:
self.DoSource(line)
else:
key, value = match.groups()
#key = key.lower() # NOTE ALL THESE KEYS ARE ACTUALLY CASE_SENSITIVE!!!!
value = value.strip()
if key == "Override":
override = value.lower() == 'true'
elif key == "AnimNode":
self.AnimNode = value
elif key == "Prop1":
self.Prop1 = value
elif key == "Prop2":
self.Prop2 = value
elif key == "Time":
self.TimeToMake = float(value)
elif key == "Sound":
self.Sound = value
elif key == "Result":
self.DoResult(value)
elif key == "OnTest":
self.LuaTest = value
elif key == "OnCreate":
self.LuaCreate = value
elif key == "OnGrab":
self.LuaGrab = value
elif key.lower() == "needtobelearn": # case insensitive
self.setNeedToBeLearn(value.lower() == 'true')
elif key.lower() == "category": # case insensitive
self.setCategory(value)
elif key == "RemoveResultItem":
self.removeResultItem = value.lower() == 'true'
elif key == "CanBeDoneFromFloor":
self.setCanBeDoneFromFloor(value.lower() == 'true')
elif key == "NearItem":
self.setNearItem(value)
elif key == "SkillRequired":
self.skillRequired = HashMap()
skills = value.split(";")
for sk in skills:
if not sk:
continue
sk, val = sk.split("=",1)
self.skillRequired[sk] = int(val)
elif key == "OnGiveXP":
self.OnGiveXP = value
elif key == "Obsolete":
if value.lower() == "true":
self.module.RecipeMap.remove(self)
self.module.RecipesWithDotInName.remove(self)
return
elif key == "Heat":
self.heat = float(value)
elif key == "NoBrokenItems":
self.NoBrokenItems = value.lower() == 'true'
if override:
recipe = self.module.getRecipe(name)
if recipe and recipe != self:
self.module.RecipeMap.remove(self)
self.module.RecipesWithDotInName.remove(self)
return
def DoSource(self, data : str) -> None:
source = Source()
if '=' in data:
data, count = data.split('=')
data, count = data.strip(), float(count.strip())
source.count = count
if data.startswith("keep"):
data = data[5:]
source.keep = True
if ";" in data:
data, count = data.split(';')
source.use = float(count)
if data.startswith("destroy"):
data = data[8:]
source.destroy = True
if data == 'null':
source.items.clear()
elif '/' in data:
for i in data.split('/'):
source.items.add(i)
else:
source.items.add(data)
if data:
self.Source.add(source)
def DoResult(self, data : str) -> None:
result = Result()
if '=' in data:
data, count = data.split('=')
data, count = data.strip(), int(count.strip())
result.count = count
if ";" in data:
data, count = data.split(';')
data, count = data.strip(), int(count.strip())
result.drainableCount = count
if '.' in data:
result.module, result.type = data.split('.')
else:
result.type = data
self.Result = result
def getResult(self) -> Result:
return self.Result
def getSound(self) -> str:
return self.Sound
def getOriginalName(self) -> str:
return self.originalname
def setOriginalName(self, value : str) -> None:
self.originalname = value
def needToBeLearn(self) -> bool:
return self.NeedToBeLearn
def setNeedToBeLearn(self, value : bool) -> None:
self.NeedToBeLearn = value
def getCategory(self) -> str:
return self.category
def setCategory(self, value : str) -> None:
self.category = value
def getRequiredSkills(self) -> None:
skills = []
if self.skillRequired:
for sk in self.skillRequired:
# TODO: we need to get the perk from string from PerkFactory
#str2 = (PerkFactory.getPerk(PerkFactory.Perks.FromString(str1))).name + " " + + this.skillRequired.get(str1);
skills.append("%s %s" % (sk, self.skillRequired[sk]))
return ArrayList(*skills)
def findSource(self, item : str) -> Source:
for source in self.Source:
if item in source.items:
return source
return None
def isDestroy(self, item : str) -> Source:
try:
source = self.findSource(item)
return source.destroy
except AttributeError: # Todo: should raise a specific exception
raise
def isKeep(self, item : str) -> Source:
try:
source = self.findSource(item)
return source.keep
except AttributeError: # Todo: should raise a specific exception
raise
def getHeat(self) -> float:
return self.heat
def noBrokenItems(self) -> bool:
return self.NoBrokenItems
def getWaterAmountNeeded(self) -> int:
source = self.findSource("Water")
if not source:
return 0
return source.count
def getNearItem(self) -> str:
return self.nearItem
def setNearItem(self, value : str) -> None:
self.nearItem = value
def isRemoveResultItem(self) -> bool:
return self.removeResultItem
def setRemoveResultItem(self, value : bool) -> None:
self.removeResultItem = value
def getAnimNode(self) -> str:
return self.AnimNode
def setAnimNode(self, value : str) -> None:
self.AnimNode = value
def getProp1(self) -> str:
return self.Prop1
def setProp1(self, value : str) -> None:
self.Prop1 = value
def getProp2(self) -> str:
return self.Prop2
def setProp2(self, value : str) -> None:
self.Prop2 = value
| StarcoderdataPython |
3384413 | # coding: utf-8
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-af92e433-20201110-100619
"""
API specification for the Configuration Governance service.
"""
from datetime import datetime
from enum import Enum
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model, datetime_to_string, string_to_datetime
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class ConfigurationGovernanceV1(BaseService):
"""The Configuration Governance V1 service."""
DEFAULT_SERVICE_URL = 'https://compliance.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'configuration_governance'
@classmethod
def new_instance(cls,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'ConfigurationGovernanceV1':
"""
Return a new client for the Configuration Governance service using the
specified parameters and external configuration.
"""
authenticator = get_authenticator_from_environment(service_name)
service = cls(
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the Configuration Governance service.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
#########################
# Rules
#########################
def create_rules(self,
transaction_id: str,
rules: List['CreateRuleRequest'],
**kwargs
) -> DetailedResponse:
"""
Create rules.
Creates one or more rules that you can use to govern the way that IBM Cloud
resources can be provisioned and configured.
A successful `POST /config/rules` request defines a rule based on the target,
conditions, and enforcement actions that you specify. The response returns the ID
value for your rule, along with other metadata.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param List[CreateRuleRequest] rules: A list of rules to be created.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `CreateRulesResponse` object
"""
if transaction_id is None:
raise ValueError('transaction_id must be provided')
if rules is None:
raise ValueError('rules must be provided')
rules = [convert_model(x) for x in rules]
headers = {
'Transaction-Id': transaction_id
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_rules')
headers.update(sdk_headers)
data = {
'rules': rules
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/config/v1/rules'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def list_rules(self,
transaction_id: str,
account_id: str,
*,
attached: bool = None,
labels: str = None,
scopes: str = None,
limit: int = None,
offset: int = None,
**kwargs
) -> DetailedResponse:
"""
List rules.
Retrieves a list of the rules that are available in your account.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param str account_id: Your IBM Cloud account ID.
:param bool attached: (optional) Retrieves a list of rules that have scope
attachments.
:param str labels: (optional) Retrieves a list of rules that match the
labels that you specify.
:param str scopes: (optional) Retrieves a list of rules that match the
scope ID that you specify.
:param int limit: (optional) The number of resources to retrieve. By
default, list operations return the first 100 items. To retrieve a
different set of items, use `limit` with `offset` to page through your
available resources.
**Usage:** If you have 20 rules, and you want to retrieve only the first 5
rules, use `../rules?account_id={account_id}&limit=5`.
:param int offset: (optional) The number of resources to skip. By
specifying `offset`, you retrieve a subset of resources that starts with
the `offset` value. Use `offset` with `limit` to page through your
available resources.
**Usage:** If you have 100 rules, and you want to retrieve rules 26 through
50, use `../rules?account_id={account_id}&offset=25&limit=5`.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `RuleList` object
"""
if transaction_id is None:
raise ValueError('transaction_id must be provided')
if account_id is None:
raise ValueError('account_id must be provided')
headers = {
'Transaction-Id': transaction_id
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_rules')
headers.update(sdk_headers)
params = {
'account_id': account_id,
'attached': attached,
'labels': labels,
'scopes': scopes,
'limit': limit,
'offset': offset
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/config/v1/rules'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_rule(self,
rule_id: str,
transaction_id: str,
**kwargs
) -> DetailedResponse:
"""
Get a rule.
Retrieves an existing rule and its details.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Rule` object
"""
if rule_id is None:
raise ValueError('rule_id must be provided')
if transaction_id is None:
raise ValueError('transaction_id must be provided')
headers = {
'Transaction-Id': transaction_id
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_rule')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['rule_id']
path_param_values = self.encode_path_vars(rule_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/config/v1/rules/{rule_id}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def update_rule(self,
rule_id: str,
transaction_id: str,
if_match: str,
name: str,
description: str,
target: 'TargetResource',
required_config: 'RuleRequiredConfig',
enforcement_actions: List['EnforcementAction'],
*,
account_id: str = None,
rule_type: str = None,
labels: List[str] = None,
**kwargs
) -> DetailedResponse:
"""
Update a rule.
Updates an existing rule based on the properties that you specify.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param str if_match: Compares a supplied `Etag` value with the version that
is stored for the requested resource. If the values match, the server
allows the request method to continue.
To find the `Etag` value, run a GET request on the resource that you want
to modify, and check the response headers.
:param str name: A human-readable alias to assign to your rule.
:param str description: An extended description of your rule.
:param TargetResource target: The properties that describe the resource
that you want to target
with the rule.
:param RuleRequiredConfig required_config:
:param List[EnforcementAction] enforcement_actions: The actions that the
service must run on your behalf when a request to create or modify the
target resource does not comply with your conditions.
:param str account_id: (optional) Your IBM Cloud account ID.
:param str rule_type: (optional) The type of rule. Rules that you create
are `user_defined`.
:param List[str] labels: (optional) Labels that you can use to group and
search for similar rules, such as those that help you to meet a specific
organization guideline.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Rule` object
"""
if rule_id is None:
raise ValueError('rule_id must be provided')
if transaction_id is None:
raise ValueError('transaction_id must be provided')
if if_match is None:
raise ValueError('if_match must be provided')
if name is None:
raise ValueError('name must be provided')
if description is None:
raise ValueError('description must be provided')
if target is None:
raise ValueError('target must be provided')
if required_config is None:
raise ValueError('required_config must be provided')
if enforcement_actions is None:
raise ValueError('enforcement_actions must be provided')
target = convert_model(target)
required_config = convert_model(required_config)
enforcement_actions = [convert_model(x) for x in enforcement_actions]
headers = {
'Transaction-Id': transaction_id,
'If-Match': if_match
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_rule')
headers.update(sdk_headers)
data = {
'name': name,
'description': description,
'target': target,
'required_config': required_config,
'enforcement_actions': enforcement_actions,
'account_id': account_id,
'rule_type': rule_type,
'labels': labels
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['rule_id']
path_param_values = self.encode_path_vars(rule_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/config/v1/rules/{rule_id}'.format(**path_param_dict)
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def delete_rule(self,
rule_id: str,
transaction_id: str,
**kwargs
) -> DetailedResponse:
"""
Delete a rule.
Deletes an existing rule.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if rule_id is None:
raise ValueError('rule_id must be provided')
if transaction_id is None:
raise ValueError('transaction_id must be provided')
headers = {
'Transaction-Id': transaction_id
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_rule')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['rule_id']
path_param_values = self.encode_path_vars(rule_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/config/v1/rules/{rule_id}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def create_attachments(self,
rule_id: str,
transaction_id: str,
attachments: List['AttachmentRequest'],
**kwargs
) -> DetailedResponse:
"""
Create attachments.
Creates one or more scope attachments for an existing rule.
You can attach an existing rule to a scope, such as a specific IBM Cloud account,
to start evaluating the rule for compliance. A successful
`POST /config/v1/rules/{rule_id}/attachments` returns the ID value for the
attachment, along with other metadata.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param List[AttachmentRequest] attachments:
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `CreateAttachmentsResponse` object
"""
if rule_id is None:
raise ValueError('rule_id must be provided')
if transaction_id is None:
raise ValueError('transaction_id must be provided')
if attachments is None:
raise ValueError('attachments must be provided')
attachments = [convert_model(x) for x in attachments]
headers = {
'Transaction-Id': transaction_id
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_attachments')
headers.update(sdk_headers)
data = {
'attachments': attachments
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['rule_id']
path_param_values = self.encode_path_vars(rule_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/config/v1/rules/{rule_id}/attachments'.format(**path_param_dict)
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def list_attachments(self,
rule_id: str,
transaction_id: str,
*,
limit: int = None,
offset: int = None,
**kwargs
) -> DetailedResponse:
"""
List attachments.
Retrieves a list of scope attachments that are associated with the specified rule.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param int limit: (optional) The number of resources to retrieve. By
default, list operations return the first 100 items. To retrieve a
different set of items, use `limit` with `offset` to page through your
available resources.
**Usage:** If you have 20 rules, and you want to retrieve only the first 5
rules, use `../rules?account_id={account_id}&limit=5`.
:param int offset: (optional) The number of resources to skip. By
specifying `offset`, you retrieve a subset of resources that starts with
the `offset` value. Use `offset` with `limit` to page through your
available resources.
**Usage:** If you have 100 rules, and you want to retrieve rules 26 through
50, use `../rules?account_id={account_id}&offset=25&limit=5`.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `AttachmentList` object
"""
if rule_id is None:
raise ValueError('rule_id must be provided')
if transaction_id is None:
raise ValueError('transaction_id must be provided')
headers = {
'Transaction-Id': transaction_id
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_attachments')
headers.update(sdk_headers)
params = {
'limit': limit,
'offset': offset
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['rule_id']
path_param_values = self.encode_path_vars(rule_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/config/v1/rules/{rule_id}/attachments'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_attachment(self,
rule_id: str,
attachment_id: str,
transaction_id: str,
**kwargs
) -> DetailedResponse:
"""
Get an attachment.
Retrieves an existing scope attachment for a rule.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str attachment_id: The UUID that uniquely identifies the attachment.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Attachment` object
"""
if rule_id is None:
raise ValueError('rule_id must be provided')
if attachment_id is None:
raise ValueError('attachment_id must be provided')
if transaction_id is None:
raise ValueError('transaction_id must be provided')
headers = {
'Transaction-Id': transaction_id
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_attachment')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['rule_id', 'attachment_id']
path_param_values = self.encode_path_vars(rule_id, attachment_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/config/v1/rules/{rule_id}/attachments/{attachment_id}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def update_attachment(self,
rule_id: str,
attachment_id: str,
transaction_id: str,
if_match: str,
account_id: str,
included_scope: 'RuleScope',
*,
excluded_scopes: List['RuleScope'] = None,
**kwargs
) -> DetailedResponse:
"""
Update an attachment.
Updates an existing scope attachment based on the properties that you specify.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str attachment_id: The UUID that uniquely identifies the attachment.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param str if_match: Compares a supplied `Etag` value with the version that
is stored for the requested resource. If the values match, the server
allows the request method to continue.
To find the `Etag` value, run a GET request on the resource that you want
to modify, and check the response headers.
:param str account_id: Your IBM Cloud account ID.
:param RuleScope included_scope: The extent at which the rule can be
attached across your accounts.
:param List[RuleScope] excluded_scopes: (optional) The extent at which the
rule can be excluded from the included scope.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Attachment` object
"""
if rule_id is None:
raise ValueError('rule_id must be provided')
if attachment_id is None:
raise ValueError('attachment_id must be provided')
if transaction_id is None:
raise ValueError('transaction_id must be provided')
if if_match is None:
raise ValueError('if_match must be provided')
if account_id is None:
raise ValueError('account_id must be provided')
if included_scope is None:
raise ValueError('included_scope must be provided')
included_scope = convert_model(included_scope)
if excluded_scopes is not None:
excluded_scopes = [convert_model(x) for x in excluded_scopes]
headers = {
'Transaction-Id': transaction_id,
'If-Match': if_match
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_attachment')
headers.update(sdk_headers)
data = {
'account_id': account_id,
'included_scope': included_scope,
'excluded_scopes': excluded_scopes
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['rule_id', 'attachment_id']
path_param_values = self.encode_path_vars(rule_id, attachment_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/config/v1/rules/{rule_id}/attachments/{attachment_id}'.format(**path_param_dict)
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def delete_attachment(self,
rule_id: str,
attachment_id: str,
transaction_id: str,
**kwargs
) -> DetailedResponse:
"""
Delete an attachment.
Deletes an existing scope attachment.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str attachment_id: The UUID that uniquely identifies the attachment.
:param str transaction_id: The unique identifier that is used to trace an
entire request. If you omit this field, the service generates and sends a
transaction ID in the
`trace` field of the response body.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `Transaction-Id` with each request.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if rule_id is None:
raise ValueError('rule_id must be provided')
if attachment_id is None:
raise ValueError('attachment_id must be provided')
if transaction_id is None:
raise ValueError('transaction_id must be provided')
headers = {
'Transaction-Id': transaction_id
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_attachment')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['rule_id', 'attachment_id']
path_param_values = self.encode_path_vars(rule_id, attachment_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/config/v1/rules/{rule_id}/attachments/{attachment_id}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
##############################################################################
# Models
##############################################################################
class Attachment():
"""
The scopes to attach to the rule.
:attr str attachment_id: The UUID that uniquely identifies the attachment.
:attr str rule_id: The UUID that uniquely identifies the rule.
:attr str account_id: Your IBM Cloud account ID.
:attr RuleScope included_scope: The extent at which the rule can be attached
across your accounts.
:attr List[RuleScope] excluded_scopes: (optional) The extent at which the rule
can be excluded from the included scope.
"""
def __init__(self,
attachment_id: str,
rule_id: str,
account_id: str,
included_scope: 'RuleScope',
*,
excluded_scopes: List['RuleScope'] = None) -> None:
"""
Initialize a Attachment object.
:param str attachment_id: The UUID that uniquely identifies the attachment.
:param str rule_id: The UUID that uniquely identifies the rule.
:param str account_id: Your IBM Cloud account ID.
:param RuleScope included_scope: The extent at which the rule can be
attached across your accounts.
:param List[RuleScope] excluded_scopes: (optional) The extent at which the
rule can be excluded from the included scope.
"""
self.attachment_id = attachment_id
self.rule_id = rule_id
self.account_id = account_id
self.included_scope = included_scope
self.excluded_scopes = excluded_scopes
@classmethod
def from_dict(cls, _dict: Dict) -> 'Attachment':
"""Initialize a Attachment object from a json dictionary."""
args = {}
if 'attachment_id' in _dict:
args['attachment_id'] = _dict.get('attachment_id')
else:
raise ValueError('Required property \'attachment_id\' not present in Attachment JSON')
if 'rule_id' in _dict:
args['rule_id'] = _dict.get('rule_id')
else:
raise ValueError('Required property \'rule_id\' not present in Attachment JSON')
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
else:
raise ValueError('Required property \'account_id\' not present in Attachment JSON')
if 'included_scope' in _dict:
args['included_scope'] = RuleScope.from_dict(_dict.get('included_scope'))
else:
raise ValueError('Required property \'included_scope\' not present in Attachment JSON')
if 'excluded_scopes' in _dict:
args['excluded_scopes'] = [RuleScope.from_dict(x) for x in _dict.get('excluded_scopes')]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Attachment object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'attachment_id') and self.attachment_id is not None:
_dict['attachment_id'] = self.attachment_id
if hasattr(self, 'rule_id') and self.rule_id is not None:
_dict['rule_id'] = self.rule_id
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'included_scope') and self.included_scope is not None:
_dict['included_scope'] = self.included_scope.to_dict()
if hasattr(self, 'excluded_scopes') and self.excluded_scopes is not None:
_dict['excluded_scopes'] = [x.to_dict() for x in self.excluded_scopes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Attachment object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Attachment') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Attachment') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class AttachmentList():
"""
A list of attachments.
:attr int offset: The requested offset for the returned items.
:attr int limit: The requested limit for the returned items.
:attr int total_count: The total number of available items.
:attr Link first: The first page of available items.
:attr Link last: The last page of available items.
:attr List[Attachment] attachments:
"""
def __init__(self,
offset: int,
limit: int,
total_count: int,
first: 'Link',
last: 'Link',
attachments: List['Attachment']) -> None:
"""
Initialize a AttachmentList object.
:param int offset: The requested offset for the returned items.
:param int limit: The requested limit for the returned items.
:param int total_count: The total number of available items.
:param Link first: The first page of available items.
:param Link last: The last page of available items.
:param List[Attachment] attachments:
"""
self.offset = offset
self.limit = limit
self.total_count = total_count
self.first = first
self.last = last
self.attachments = attachments
@classmethod
def from_dict(cls, _dict: Dict) -> 'AttachmentList':
"""Initialize a AttachmentList object from a json dictionary."""
args = {}
if 'offset' in _dict:
args['offset'] = _dict.get('offset')
else:
raise ValueError('Required property \'offset\' not present in AttachmentList JSON')
if 'limit' in _dict:
args['limit'] = _dict.get('limit')
else:
raise ValueError('Required property \'limit\' not present in AttachmentList JSON')
if 'total_count' in _dict:
args['total_count'] = _dict.get('total_count')
else:
raise ValueError('Required property \'total_count\' not present in AttachmentList JSON')
if 'first' in _dict:
args['first'] = Link.from_dict(_dict.get('first'))
else:
raise ValueError('Required property \'first\' not present in AttachmentList JSON')
if 'last' in _dict:
args['last'] = Link.from_dict(_dict.get('last'))
else:
raise ValueError('Required property \'last\' not present in AttachmentList JSON')
if 'attachments' in _dict:
args['attachments'] = [Attachment.from_dict(x) for x in _dict.get('attachments')]
else:
raise ValueError('Required property \'attachments\' not present in AttachmentList JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a AttachmentList object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'offset') and self.offset is not None:
_dict['offset'] = self.offset
if hasattr(self, 'limit') and self.limit is not None:
_dict['limit'] = self.limit
if hasattr(self, 'total_count') and self.total_count is not None:
_dict['total_count'] = self.total_count
if hasattr(self, 'first') and self.first is not None:
_dict['first'] = self.first.to_dict()
if hasattr(self, 'last') and self.last is not None:
_dict['last'] = self.last.to_dict()
if hasattr(self, 'attachments') and self.attachments is not None:
_dict['attachments'] = [x.to_dict() for x in self.attachments]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this AttachmentList object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'AttachmentList') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AttachmentList') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class AttachmentRequest():
"""
The scopes to attach to the rule.
:attr str account_id: Your IBM Cloud account ID.
:attr RuleScope included_scope: The extent at which the rule can be attached
across your accounts.
:attr List[RuleScope] excluded_scopes: (optional) The extent at which the rule
can be excluded from the included scope.
"""
def __init__(self,
account_id: str,
included_scope: 'RuleScope',
*,
excluded_scopes: List['RuleScope'] = None) -> None:
"""
Initialize a AttachmentRequest object.
:param str account_id: Your IBM Cloud account ID.
:param RuleScope included_scope: The extent at which the rule can be
attached across your accounts.
:param List[RuleScope] excluded_scopes: (optional) The extent at which the
rule can be excluded from the included scope.
"""
self.account_id = account_id
self.included_scope = included_scope
self.excluded_scopes = excluded_scopes
@classmethod
def from_dict(cls, _dict: Dict) -> 'AttachmentRequest':
"""Initialize a AttachmentRequest object from a json dictionary."""
args = {}
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
else:
raise ValueError('Required property \'account_id\' not present in AttachmentRequest JSON')
if 'included_scope' in _dict:
args['included_scope'] = RuleScope.from_dict(_dict.get('included_scope'))
else:
raise ValueError('Required property \'included_scope\' not present in AttachmentRequest JSON')
if 'excluded_scopes' in _dict:
args['excluded_scopes'] = [RuleScope.from_dict(x) for x in _dict.get('excluded_scopes')]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a AttachmentRequest object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'included_scope') and self.included_scope is not None:
_dict['included_scope'] = self.included_scope.to_dict()
if hasattr(self, 'excluded_scopes') and self.excluded_scopes is not None:
_dict['excluded_scopes'] = [x.to_dict() for x in self.excluded_scopes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this AttachmentRequest object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'AttachmentRequest') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AttachmentRequest') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateAttachmentsResponse():
"""
CreateAttachmentsResponse.
:attr List[Attachment] attachments:
"""
def __init__(self,
attachments: List['Attachment']) -> None:
"""
Initialize a CreateAttachmentsResponse object.
:param List[Attachment] attachments:
"""
self.attachments = attachments
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateAttachmentsResponse':
"""Initialize a CreateAttachmentsResponse object from a json dictionary."""
args = {}
if 'attachments' in _dict:
args['attachments'] = [Attachment.from_dict(x) for x in _dict.get('attachments')]
else:
raise ValueError('Required property \'attachments\' not present in CreateAttachmentsResponse JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateAttachmentsResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'attachments') and self.attachments is not None:
_dict['attachments'] = [x.to_dict() for x in self.attachments]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CreateAttachmentsResponse object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CreateAttachmentsResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CreateAttachmentsResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateRuleRequest():
"""
A rule to be created.
:attr str request_id: (optional) A field that you can use in bulk operations to
store a custom identifier for an individual request. If you omit this field, the
service generates and sends a `request_id` string for each new rule. The
generated string corresponds with the numerical order of the rules request
array. For example, `"request_id": "1"`, `"request_id": "2"`.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `request_id` with each request.
:attr RuleRequest rule: User-settable properties associated with a rule to be
created or updated.
"""
def __init__(self,
rule: 'RuleRequest',
*,
request_id: str = None) -> None:
"""
Initialize a CreateRuleRequest object.
:param RuleRequest rule: User-settable properties associated with a rule to
be created or updated.
:param str request_id: (optional) A field that you can use in bulk
operations to store a custom identifier for an individual request. If you
omit this field, the service generates and sends a `request_id` string for
each new rule. The generated string corresponds with the numerical order of
the rules request array. For example, `"request_id": "1"`, `"request_id":
"2"`.
**Note:** To help with debugging logs, it is strongly recommended that you
generate and supply a `request_id` with each request.
"""
self.request_id = request_id
self.rule = rule
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateRuleRequest':
"""Initialize a CreateRuleRequest object from a json dictionary."""
args = {}
if 'request_id' in _dict:
args['request_id'] = _dict.get('request_id')
if 'rule' in _dict:
args['rule'] = RuleRequest.from_dict(_dict.get('rule'))
else:
raise ValueError('Required property \'rule\' not present in CreateRuleRequest JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateRuleRequest object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'request_id') and self.request_id is not None:
_dict['request_id'] = self.request_id
if hasattr(self, 'rule') and self.rule is not None:
_dict['rule'] = self.rule.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CreateRuleRequest object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CreateRuleRequest') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CreateRuleRequest') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateRuleResponse():
"""
Response information for a rule request.
If the 'status_code' property indicates success, the 'request_id' and 'rule'
properties will be present. If the 'status_code' property indicates an error, the
'request_id', 'errors', and 'trace' fields will be present.
:attr str request_id: (optional) The identifier that is used to correlate an
individual request.
To assist with debugging, you can use this ID to identify and inspect only one
request that was made as part of a bulk operation.
:attr int status_code: (optional) The HTTP response status code.
:attr Rule rule: (optional) Information about a newly-created rule.
This field will be present for a successful request.
:attr List[RuleResponseError] errors: (optional) The error contents of the
multi-status response.
This field will be present for a failed rule request.
:attr str trace: (optional) The UUID that uniquely identifies the request.
This field will be present for a failed rule request.
"""
def __init__(self,
*,
request_id: str = None,
status_code: int = None,
rule: 'Rule' = None,
errors: List['RuleResponseError'] = None,
trace: str = None) -> None:
"""
Initialize a CreateRuleResponse object.
:param str request_id: (optional) The identifier that is used to correlate
an individual request.
To assist with debugging, you can use this ID to identify and inspect only
one request that was made as part of a bulk operation.
:param int status_code: (optional) The HTTP response status code.
:param Rule rule: (optional) Information about a newly-created rule.
This field will be present for a successful request.
:param List[RuleResponseError] errors: (optional) The error contents of the
multi-status response.
This field will be present for a failed rule request.
:param str trace: (optional) The UUID that uniquely identifies the request.
This field will be present for a failed rule request.
"""
self.request_id = request_id
self.status_code = status_code
self.rule = rule
self.errors = errors
self.trace = trace
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateRuleResponse':
"""Initialize a CreateRuleResponse object from a json dictionary."""
args = {}
if 'request_id' in _dict:
args['request_id'] = _dict.get('request_id')
if 'status_code' in _dict:
args['status_code'] = _dict.get('status_code')
if 'rule' in _dict:
args['rule'] = Rule.from_dict(_dict.get('rule'))
if 'errors' in _dict:
args['errors'] = [RuleResponseError.from_dict(x) for x in _dict.get('errors')]
if 'trace' in _dict:
args['trace'] = _dict.get('trace')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateRuleResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'request_id') and self.request_id is not None:
_dict['request_id'] = self.request_id
if hasattr(self, 'status_code') and self.status_code is not None:
_dict['status_code'] = self.status_code
if hasattr(self, 'rule') and self.rule is not None:
_dict['rule'] = self.rule.to_dict()
if hasattr(self, 'errors') and self.errors is not None:
_dict['errors'] = [x.to_dict() for x in self.errors]
if hasattr(self, 'trace') and self.trace is not None:
_dict['trace'] = self.trace
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CreateRuleResponse object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CreateRuleResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CreateRuleResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateRulesResponse():
"""
The response associated with a request to create one or more rules.
:attr List[CreateRuleResponse] rules: An array of rule responses.
"""
def __init__(self,
rules: List['CreateRuleResponse']) -> None:
"""
Initialize a CreateRulesResponse object.
:param List[CreateRuleResponse] rules: An array of rule responses.
"""
self.rules = rules
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateRulesResponse':
"""Initialize a CreateRulesResponse object from a json dictionary."""
args = {}
if 'rules' in _dict:
args['rules'] = [CreateRuleResponse.from_dict(x) for x in _dict.get('rules')]
else:
raise ValueError('Required property \'rules\' not present in CreateRulesResponse JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateRulesResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'rules') and self.rules is not None:
_dict['rules'] = [x.to_dict() for x in self.rules]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CreateRulesResponse object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CreateRulesResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CreateRulesResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class EnforcementAction():
"""
EnforcementAction.
:attr str action: To block a request from completing, use `disallow`. To log the
request to Activity Tracker with LogDNA, use `audit_log`.
"""
def __init__(self,
action: str) -> None:
"""
Initialize a EnforcementAction object.
:param str action: To block a request from completing, use `disallow`. To
log the request to Activity Tracker with LogDNA, use `audit_log`.
"""
self.action = action
@classmethod
def from_dict(cls, _dict: Dict) -> 'EnforcementAction':
"""Initialize a EnforcementAction object from a json dictionary."""
args = {}
if 'action' in _dict:
args['action'] = _dict.get('action')
else:
raise ValueError('Required property \'action\' not present in EnforcementAction JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a EnforcementAction object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'action') and self.action is not None:
_dict['action'] = self.action
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this EnforcementAction object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'EnforcementAction') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'EnforcementAction') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ActionEnum(str, Enum):
"""
To block a request from completing, use `disallow`. To log the request to Activity
Tracker with LogDNA, use `audit_log`.
"""
AUDIT_LOG = 'audit_log'
DISALLOW = 'disallow'
class Link():
"""
A link that is used to paginate through available resources.
:attr str href: The URL for the first, previous, next, or last page of
resources.
"""
def __init__(self,
href: str) -> None:
"""
Initialize a Link object.
:param str href: The URL for the first, previous, next, or last page of
resources.
"""
self.href = href
@classmethod
def from_dict(cls, _dict: Dict) -> 'Link':
"""Initialize a Link object from a json dictionary."""
args = {}
if 'href' in _dict:
args['href'] = _dict.get('href')
else:
raise ValueError('Required property \'href\' not present in Link JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Link object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'href') and self.href is not None:
_dict['href'] = self.href
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Link object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Link') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Link') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Rule():
"""
Properties associated with a rule, including both user-settable and server-populated
properties.
:attr str account_id: (optional) Your IBM Cloud account ID.
:attr str name: A human-readable alias to assign to your rule.
:attr str description: An extended description of your rule.
:attr str rule_type: (optional) The type of rule. Rules that you create are
`user_defined`.
:attr TargetResource target: The properties that describe the resource that you
want to target
with the rule.
:attr RuleRequiredConfig required_config:
:attr List[EnforcementAction] enforcement_actions: The actions that the service
must run on your behalf when a request to create or modify the target resource
does not comply with your conditions.
:attr List[str] labels: (optional) Labels that you can use to group and search
for similar rules, such as those that help you to meet a specific organization
guideline.
:attr str rule_id: (optional) The UUID that uniquely identifies the rule.
:attr datetime creation_date: (optional) The date the resource was created.
:attr str created_by: (optional) The unique identifier for the user or
application that created the resource.
:attr datetime modification_date: (optional) The date the resource was last
modified.
:attr str modified_by: (optional) The unique identifier for the user or
application that last modified the resource.
:attr int number_of_attachments: (optional) The number of scope attachments that
are associated with the rule.
"""
def __init__(self,
name: str,
description: str,
target: 'TargetResource',
required_config: 'RuleRequiredConfig',
enforcement_actions: List['EnforcementAction'],
*,
account_id: str = None,
rule_type: str = None,
labels: List[str] = None,
rule_id: str = None,
creation_date: datetime = None,
created_by: str = None,
modification_date: datetime = None,
modified_by: str = None,
number_of_attachments: int = None) -> None:
"""
Initialize a Rule object.
:param str name: A human-readable alias to assign to your rule.
:param str description: An extended description of your rule.
:param TargetResource target: The properties that describe the resource
that you want to target
with the rule.
:param RuleRequiredConfig required_config:
:param List[EnforcementAction] enforcement_actions: The actions that the
service must run on your behalf when a request to create or modify the
target resource does not comply with your conditions.
:param str account_id: (optional) Your IBM Cloud account ID.
:param str rule_type: (optional) The type of rule. Rules that you create
are `user_defined`.
:param List[str] labels: (optional) Labels that you can use to group and
search for similar rules, such as those that help you to meet a specific
organization guideline.
"""
self.account_id = account_id
self.name = name
self.description = description
self.rule_type = rule_type
self.target = target
self.required_config = required_config
self.enforcement_actions = enforcement_actions
self.labels = labels
self.rule_id = rule_id
self.creation_date = creation_date
self.created_by = created_by
self.modification_date = modification_date
self.modified_by = modified_by
self.number_of_attachments = number_of_attachments
@classmethod
def from_dict(cls, _dict: Dict) -> 'Rule':
"""Initialize a Rule object from a json dictionary."""
args = {}
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in Rule JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
else:
raise ValueError('Required property \'description\' not present in Rule JSON')
if 'rule_type' in _dict:
args['rule_type'] = _dict.get('rule_type')
if 'target' in _dict:
args['target'] = TargetResource.from_dict(_dict.get('target'))
else:
raise ValueError('Required property \'target\' not present in Rule JSON')
if 'required_config' in _dict:
args['required_config'] = _dict.get('required_config')
else:
raise ValueError('Required property \'required_config\' not present in Rule JSON')
if 'enforcement_actions' in _dict:
args['enforcement_actions'] = [EnforcementAction.from_dict(x) for x in _dict.get('enforcement_actions')]
else:
raise ValueError('Required property \'enforcement_actions\' not present in Rule JSON')
if 'labels' in _dict:
args['labels'] = _dict.get('labels')
if 'rule_id' in _dict:
args['rule_id'] = _dict.get('rule_id')
if 'creation_date' in _dict:
args['creation_date'] = string_to_datetime(_dict.get('creation_date'))
if 'created_by' in _dict:
args['created_by'] = _dict.get('created_by')
if 'modification_date' in _dict:
args['modification_date'] = string_to_datetime(_dict.get('modification_date'))
if 'modified_by' in _dict:
args['modified_by'] = _dict.get('modified_by')
if 'number_of_attachments' in _dict:
args['number_of_attachments'] = _dict.get('number_of_attachments')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Rule object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'rule_type') and self.rule_type is not None:
_dict['rule_type'] = self.rule_type
if hasattr(self, 'target') and self.target is not None:
_dict['target'] = self.target.to_dict()
if hasattr(self, 'required_config') and self.required_config is not None:
if isinstance(self.required_config, dict):
_dict['required_config'] = self.required_config
else:
_dict['required_config'] = self.required_config.to_dict()
if hasattr(self, 'enforcement_actions') and self.enforcement_actions is not None:
_dict['enforcement_actions'] = [x.to_dict() for x in self.enforcement_actions]
if hasattr(self, 'labels') and self.labels is not None:
_dict['labels'] = self.labels
if hasattr(self, 'rule_id') and getattr(self, 'rule_id') is not None:
_dict['rule_id'] = getattr(self, 'rule_id')
if hasattr(self, 'creation_date') and getattr(self, 'creation_date') is not None:
_dict['creation_date'] = datetime_to_string(getattr(self, 'creation_date'))
if hasattr(self, 'created_by') and getattr(self, 'created_by') is not None:
_dict['created_by'] = getattr(self, 'created_by')
if hasattr(self, 'modification_date') and getattr(self, 'modification_date') is not None:
_dict['modification_date'] = datetime_to_string(getattr(self, 'modification_date'))
if hasattr(self, 'modified_by') and getattr(self, 'modified_by') is not None:
_dict['modified_by'] = getattr(self, 'modified_by')
if hasattr(self, 'number_of_attachments') and getattr(self, 'number_of_attachments') is not None:
_dict['number_of_attachments'] = getattr(self, 'number_of_attachments')
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Rule object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Rule') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Rule') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuleTypeEnum(str, Enum):
"""
The type of rule. Rules that you create are `user_defined`.
"""
USER_DEFINED = 'user_defined'
class RuleCondition():
"""
RuleCondition.
"""
def __init__(self) -> None:
"""
Initialize a RuleCondition object.
"""
msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format(
", ".join(['RuleConditionSingleProperty', 'RuleConditionOrLvl2', 'RuleConditionAndLvl2']))
raise Exception(msg)
class RuleList():
"""
A list of rules.
:attr int offset: The requested offset for the returned items.
:attr int limit: The requested limit for the returned items.
:attr int total_count: The total number of available items.
:attr Link first: The first page of available items.
:attr Link last: The last page of available items.
:attr List[Rule] rules: An array of rules.
"""
def __init__(self,
offset: int,
limit: int,
total_count: int,
first: 'Link',
last: 'Link',
rules: List['Rule']) -> None:
"""
Initialize a RuleList object.
:param int offset: The requested offset for the returned items.
:param int limit: The requested limit for the returned items.
:param int total_count: The total number of available items.
:param Link first: The first page of available items.
:param Link last: The last page of available items.
:param List[Rule] rules: An array of rules.
"""
self.offset = offset
self.limit = limit
self.total_count = total_count
self.first = first
self.last = last
self.rules = rules
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleList':
"""Initialize a RuleList object from a json dictionary."""
args = {}
if 'offset' in _dict:
args['offset'] = _dict.get('offset')
else:
raise ValueError('Required property \'offset\' not present in RuleList JSON')
if 'limit' in _dict:
args['limit'] = _dict.get('limit')
else:
raise ValueError('Required property \'limit\' not present in RuleList JSON')
if 'total_count' in _dict:
args['total_count'] = _dict.get('total_count')
else:
raise ValueError('Required property \'total_count\' not present in RuleList JSON')
if 'first' in _dict:
args['first'] = Link.from_dict(_dict.get('first'))
else:
raise ValueError('Required property \'first\' not present in RuleList JSON')
if 'last' in _dict:
args['last'] = Link.from_dict(_dict.get('last'))
else:
raise ValueError('Required property \'last\' not present in RuleList JSON')
if 'rules' in _dict:
args['rules'] = [Rule.from_dict(x) for x in _dict.get('rules')]
else:
raise ValueError('Required property \'rules\' not present in RuleList JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleList object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'offset') and self.offset is not None:
_dict['offset'] = self.offset
if hasattr(self, 'limit') and self.limit is not None:
_dict['limit'] = self.limit
if hasattr(self, 'total_count') and self.total_count is not None:
_dict['total_count'] = self.total_count
if hasattr(self, 'first') and self.first is not None:
_dict['first'] = self.first.to_dict()
if hasattr(self, 'last') and self.last is not None:
_dict['last'] = self.last.to_dict()
if hasattr(self, 'rules') and self.rules is not None:
_dict['rules'] = [x.to_dict() for x in self.rules]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleList object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleList') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleList') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuleRequest():
"""
User-settable properties associated with a rule to be created or updated.
:attr str account_id: (optional) Your IBM Cloud account ID.
:attr str name: A human-readable alias to assign to your rule.
:attr str description: An extended description of your rule.
:attr str rule_type: (optional) The type of rule. Rules that you create are
`user_defined`.
:attr TargetResource target: The properties that describe the resource that you
want to target
with the rule.
:attr RuleRequiredConfig required_config:
:attr List[EnforcementAction] enforcement_actions: The actions that the service
must run on your behalf when a request to create or modify the target resource
does not comply with your conditions.
:attr List[str] labels: (optional) Labels that you can use to group and search
for similar rules, such as those that help you to meet a specific organization
guideline.
"""
def __init__(self,
name: str,
description: str,
target: 'TargetResource',
required_config: 'RuleRequiredConfig',
enforcement_actions: List['EnforcementAction'],
*,
account_id: str = None,
rule_type: str = None,
labels: List[str] = None) -> None:
"""
Initialize a RuleRequest object.
:param str name: A human-readable alias to assign to your rule.
:param str description: An extended description of your rule.
:param TargetResource target: The properties that describe the resource
that you want to target
with the rule.
:param RuleRequiredConfig required_config:
:param List[EnforcementAction] enforcement_actions: The actions that the
service must run on your behalf when a request to create or modify the
target resource does not comply with your conditions.
:param str account_id: (optional) Your IBM Cloud account ID.
:param str rule_type: (optional) The type of rule. Rules that you create
are `user_defined`.
:param List[str] labels: (optional) Labels that you can use to group and
search for similar rules, such as those that help you to meet a specific
organization guideline.
"""
self.account_id = account_id
self.name = name
self.description = description
self.rule_type = rule_type
self.target = target
self.required_config = required_config
self.enforcement_actions = enforcement_actions
self.labels = labels
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleRequest':
"""Initialize a RuleRequest object from a json dictionary."""
args = {}
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in RuleRequest JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
else:
raise ValueError('Required property \'description\' not present in RuleRequest JSON')
if 'rule_type' in _dict:
args['rule_type'] = _dict.get('rule_type')
if 'target' in _dict:
args['target'] = TargetResource.from_dict(_dict.get('target'))
else:
raise ValueError('Required property \'target\' not present in RuleRequest JSON')
if 'required_config' in _dict:
args['required_config'] = _dict.get('required_config')
else:
raise ValueError('Required property \'required_config\' not present in RuleRequest JSON')
if 'enforcement_actions' in _dict:
args['enforcement_actions'] = [EnforcementAction.from_dict(x) for x in _dict.get('enforcement_actions')]
else:
raise ValueError('Required property \'enforcement_actions\' not present in RuleRequest JSON')
if 'labels' in _dict:
args['labels'] = _dict.get('labels')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleRequest object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'rule_type') and self.rule_type is not None:
_dict['rule_type'] = self.rule_type
if hasattr(self, 'target') and self.target is not None:
_dict['target'] = self.target.to_dict()
if hasattr(self, 'required_config') and self.required_config is not None:
if isinstance(self.required_config, dict):
_dict['required_config'] = self.required_config
else:
_dict['required_config'] = self.required_config.to_dict()
if hasattr(self, 'enforcement_actions') and self.enforcement_actions is not None:
_dict['enforcement_actions'] = [x.to_dict() for x in self.enforcement_actions]
if hasattr(self, 'labels') and self.labels is not None:
_dict['labels'] = self.labels
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleRequest object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleRequest') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleRequest') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuleTypeEnum(str, Enum):
"""
The type of rule. Rules that you create are `user_defined`.
"""
USER_DEFINED = 'user_defined'
class RuleRequiredConfig():
"""
RuleRequiredConfig.
"""
def __init__(self) -> None:
"""
Initialize a RuleRequiredConfig object.
"""
msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format(
", ".join(['RuleRequiredConfigSingleProperty', 'RuleRequiredConfigMultipleProperties']))
raise Exception(msg)
class RuleResponseError():
"""
RuleResponseError.
:attr str code: Specifies the problem that caused the error.
:attr str message: Describes the problem.
"""
def __init__(self,
code: str,
message: str) -> None:
"""
Initialize a RuleResponseError object.
:param str code: Specifies the problem that caused the error.
:param str message: Describes the problem.
"""
self.code = code
self.message = message
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleResponseError':
"""Initialize a RuleResponseError object from a json dictionary."""
args = {}
if 'code' in _dict:
args['code'] = _dict.get('code')
else:
raise ValueError('Required property \'code\' not present in RuleResponseError JSON')
if 'message' in _dict:
args['message'] = _dict.get('message')
else:
raise ValueError('Required property \'message\' not present in RuleResponseError JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleResponseError object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'code') and self.code is not None:
_dict['code'] = self.code
if hasattr(self, 'message') and self.message is not None:
_dict['message'] = self.message
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleResponseError object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleResponseError') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleResponseError') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuleScope():
"""
The extent at which the rule can be attached across your accounts.
:attr str note: (optional) A short description or alias to assign to the scope.
:attr str scope_id: The ID of the scope, such as an enterprise, account, or
account group, that you want to evaluate.
:attr str scope_type: The type of scope that you want to evaluate.
"""
def __init__(self,
scope_id: str,
scope_type: str,
*,
note: str = None) -> None:
"""
Initialize a RuleScope object.
:param str scope_id: The ID of the scope, such as an enterprise, account,
or account group, that you want to evaluate.
:param str scope_type: The type of scope that you want to evaluate.
:param str note: (optional) A short description or alias to assign to the
scope.
"""
self.note = note
self.scope_id = scope_id
self.scope_type = scope_type
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleScope':
"""Initialize a RuleScope object from a json dictionary."""
args = {}
if 'note' in _dict:
args['note'] = _dict.get('note')
if 'scope_id' in _dict:
args['scope_id'] = _dict.get('scope_id')
else:
raise ValueError('Required property \'scope_id\' not present in RuleScope JSON')
if 'scope_type' in _dict:
args['scope_type'] = _dict.get('scope_type')
else:
raise ValueError('Required property \'scope_type\' not present in RuleScope JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleScope object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'note') and self.note is not None:
_dict['note'] = self.note
if hasattr(self, 'scope_id') and self.scope_id is not None:
_dict['scope_id'] = self.scope_id
if hasattr(self, 'scope_type') and self.scope_type is not None:
_dict['scope_type'] = self.scope_type
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleScope object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleScope') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleScope') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ScopeTypeEnum(str, Enum):
"""
The type of scope that you want to evaluate.
"""
ENTERPRISE = 'enterprise'
ENTERPRISE_ACCOUNT_GROUP = 'enterprise.account_group'
ENTERPRISE_ACCOUNT = 'enterprise.account'
ACCOUNT = 'account'
ACCOUNT_RESOURCE_GROUP = 'account.resource_group'
class RuleSingleProperty():
"""
The requirement that must be met to determine the resource's level of compliance in
accordance with the rule.
To apply a single property check, define a configuration property and the desired
value that you want to check against.
:attr str description: (optional)
:attr str property: A resource configuration variable that describes the
property that you want to apply to the target resource.
Available options depend on the target service and resource.
:attr str operator: The way in which the `property` field is compared to its
value.
There are three types of operators: string, numeric, and boolean.
:attr str value: (optional) The way in which you want your property to be
applied.
Value options differ depending on the rule that you configure. If you use a
boolean operator, you do not need to input a value.
"""
def __init__(self,
property: str,
operator: str,
*,
description: str = None,
value: str = None) -> None:
"""
Initialize a RuleSingleProperty object.
:param str property: A resource configuration variable that describes the
property that you want to apply to the target resource.
Available options depend on the target service and resource.
:param str operator: The way in which the `property` field is compared to
its value.
There are three types of operators: string, numeric, and boolean.
:param str description: (optional)
:param str value: (optional) The way in which you want your property to be
applied.
Value options differ depending on the rule that you configure. If you use a
boolean operator, you do not need to input a value.
"""
self.description = description
self.property = property
self.operator = operator
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleSingleProperty':
"""Initialize a RuleSingleProperty object from a json dictionary."""
args = {}
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'property' in _dict:
args['property'] = _dict.get('property')
else:
raise ValueError('Required property \'property\' not present in RuleSingleProperty JSON')
if 'operator' in _dict:
args['operator'] = _dict.get('operator')
else:
raise ValueError('Required property \'operator\' not present in RuleSingleProperty JSON')
if 'value' in _dict:
args['value'] = _dict.get('value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleSingleProperty object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'property') and self.property is not None:
_dict['property'] = self.property
if hasattr(self, 'operator') and self.operator is not None:
_dict['operator'] = self.operator
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleSingleProperty object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleSingleProperty') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleSingleProperty') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OperatorEnum(str, Enum):
"""
The way in which the `property` field is compared to its value.
There are three types of operators: string, numeric, and boolean.
"""
IS_TRUE = 'is_true'
IS_FALSE = 'is_false'
IS_EMPTY = 'is_empty'
IS_NOT_EMPTY = 'is_not_empty'
STRING_EQUALS = 'string_equals'
STRING_NOT_EQUALS = 'string_not_equals'
STRING_MATCH = 'string_match'
STRING_NOT_MATCH = 'string_not_match'
NUM_EQUALS = 'num_equals'
NUM_NOT_EQUALS = 'num_not_equals'
NUM_LESS_THAN = 'num_less_than'
NUM_LESS_THAN_EQUALS = 'num_less_than_equals'
NUM_GREATER_THAN = 'num_greater_than'
NUM_GREATER_THAN_EQUALS = 'num_greater_than_equals'
IPS_IN_RANGE = 'ips_in_range'
STRINGS_IN_LIST = 'strings_in_list'
class RuleTargetAttribute():
"""
The attributes that are associated with a rule target.
:attr str name:
:attr str operator: The way in which the `name` field is compared to its value.
There are three types of operators: string, numeric, and boolean.
:attr str value: (optional) The way in which you want your property to be
applied.
Value options differ depending on the rule that you configure. If you use a
boolean operator, you do not need to input a value.
"""
def __init__(self,
name: str,
operator: str,
*,
value: str = None) -> None:
"""
Initialize a RuleTargetAttribute object.
:param str name:
:param str operator: The way in which the `name` field is compared to its
value.
There are three types of operators: string, numeric, and boolean.
:param str value: (optional) The way in which you want your property to be
applied.
Value options differ depending on the rule that you configure. If you use a
boolean operator, you do not need to input a value.
"""
self.name = name
self.operator = operator
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleTargetAttribute':
"""Initialize a RuleTargetAttribute object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in RuleTargetAttribute JSON')
if 'operator' in _dict:
args['operator'] = _dict.get('operator')
else:
raise ValueError('Required property \'operator\' not present in RuleTargetAttribute JSON')
if 'value' in _dict:
args['value'] = _dict.get('value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleTargetAttribute object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'operator') and self.operator is not None:
_dict['operator'] = self.operator
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleTargetAttribute object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleTargetAttribute') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleTargetAttribute') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OperatorEnum(str, Enum):
"""
The way in which the `name` field is compared to its value.
There are three types of operators: string, numeric, and boolean.
"""
STRING_EQUALS = 'string_equals'
STRING_NOT_EQUALS = 'string_not_equals'
STRING_MATCH = 'string_match'
STRING_NOT_MATCH = 'string_not_match'
NUM_EQUALS = 'num_equals'
NUM_NOT_EQUALS = 'num_not_equals'
NUM_LESS_THAN = 'num_less_than'
NUM_LESS_THAN_EQUALS = 'num_less_than_equals'
NUM_GREATER_THAN = 'num_greater_than'
NUM_GREATER_THAN_EQUALS = 'num_greater_than_equals'
IS_EMPTY = 'is_empty'
IS_NOT_EMPTY = 'is_not_empty'
IS_TRUE = 'is_true'
IS_FALSE = 'is_false'
IPS_IN_RANGE = 'ips_in_range'
STRINGS_IN_LIST = 'strings_in_list'
class TargetResource():
"""
The properties that describe the resource that you want to target with the rule.
:attr str service_name: The programmatic name of the IBM Cloud service that you
want to target with the rule.
:attr str resource_kind: The type of resource that you want to target.
:attr List[RuleTargetAttribute] additional_target_attributes: (optional) An
extra qualifier for the resource kind. When you include additional attributes,
only the resources that match the definition are included in the rule.
"""
def __init__(self,
service_name: str,
resource_kind: str,
*,
additional_target_attributes: List['RuleTargetAttribute'] = None) -> None:
"""
Initialize a TargetResource object.
:param str service_name: The programmatic name of the IBM Cloud service
that you want to target with the rule.
:param str resource_kind: The type of resource that you want to target.
:param List[RuleTargetAttribute] additional_target_attributes: (optional)
An extra qualifier for the resource kind. When you include additional
attributes, only the resources that match the definition are included in
the rule.
"""
self.service_name = service_name
self.resource_kind = resource_kind
self.additional_target_attributes = additional_target_attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'TargetResource':
"""Initialize a TargetResource object from a json dictionary."""
args = {}
if 'service_name' in _dict:
args['service_name'] = _dict.get('service_name')
else:
raise ValueError('Required property \'service_name\' not present in TargetResource JSON')
if 'resource_kind' in _dict:
args['resource_kind'] = _dict.get('resource_kind')
else:
raise ValueError('Required property \'resource_kind\' not present in TargetResource JSON')
if 'additional_target_attributes' in _dict:
args['additional_target_attributes'] = [RuleTargetAttribute.from_dict(x) for x in _dict.get('additional_target_attributes')]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TargetResource object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'service_name') and self.service_name is not None:
_dict['service_name'] = self.service_name
if hasattr(self, 'resource_kind') and self.resource_kind is not None:
_dict['resource_kind'] = self.resource_kind
if hasattr(self, 'additional_target_attributes') and self.additional_target_attributes is not None:
_dict['additional_target_attributes'] = [x.to_dict() for x in self.additional_target_attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TargetResource object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'TargetResource') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TargetResource') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuleConditionAndLvl2(RuleCondition):
"""
A condition with the `and` logical operator.
:attr str description: (optional)
:attr List[RuleSingleProperty] and_:
"""
def __init__(self,
and_: List['RuleSingleProperty'],
*,
description: str = None) -> None:
"""
Initialize a RuleConditionAndLvl2 object.
:param List[RuleSingleProperty] and_:
:param str description: (optional)
"""
# pylint: disable=super-init-not-called
self.description = description
self.and_ = and_
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleConditionAndLvl2':
"""Initialize a RuleConditionAndLvl2 object from a json dictionary."""
args = {}
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'and' in _dict:
args['and_'] = [RuleSingleProperty.from_dict(x) for x in _dict.get('and')]
else:
raise ValueError('Required property \'and\' not present in RuleConditionAndLvl2 JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleConditionAndLvl2 object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'and_') and self.and_ is not None:
_dict['and'] = [x.to_dict() for x in self.and_]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleConditionAndLvl2 object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleConditionAndLvl2') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleConditionAndLvl2') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuleConditionOrLvl2(RuleCondition):
"""
A condition with the `or` logical operator.
:attr str description: (optional)
:attr List[RuleSingleProperty] or_:
"""
def __init__(self,
or_: List['RuleSingleProperty'],
*,
description: str = None) -> None:
"""
Initialize a RuleConditionOrLvl2 object.
:param List[RuleSingleProperty] or_:
:param str description: (optional)
"""
# pylint: disable=super-init-not-called
self.description = description
self.or_ = or_
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleConditionOrLvl2':
"""Initialize a RuleConditionOrLvl2 object from a json dictionary."""
args = {}
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'or' in _dict:
args['or_'] = [RuleSingleProperty.from_dict(x) for x in _dict.get('or')]
else:
raise ValueError('Required property \'or\' not present in RuleConditionOrLvl2 JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleConditionOrLvl2 object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'or_') and self.or_ is not None:
_dict['or'] = [x.to_dict() for x in self.or_]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleConditionOrLvl2 object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleConditionOrLvl2') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleConditionOrLvl2') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuleConditionSingleProperty(RuleCondition):
"""
The requirement that must be met to determine the resource's level of compliance in
accordance with the rule.
To apply a single property check, define a configuration property and the desired
value that you want to check against.
:attr str description: (optional)
:attr str property: A resource configuration variable that describes the
property that you want to apply to the target resource.
Available options depend on the target service and resource.
:attr str operator: The way in which the `property` field is compared to its
value.
There are three types of operators: string, numeric, and boolean.
:attr str value: (optional) The way in which you want your property to be
applied.
Value options differ depending on the rule that you configure. If you use a
boolean operator, you do not need to input a value.
"""
def __init__(self,
property: str,
operator: str,
*,
description: str = None,
value: str = None) -> None:
"""
Initialize a RuleConditionSingleProperty object.
:param str property: A resource configuration variable that describes the
property that you want to apply to the target resource.
Available options depend on the target service and resource.
:param str operator: The way in which the `property` field is compared to
its value.
There are three types of operators: string, numeric, and boolean.
:param str description: (optional)
:param str value: (optional) The way in which you want your property to be
applied.
Value options differ depending on the rule that you configure. If you use a
boolean operator, you do not need to input a value.
"""
# pylint: disable=super-init-not-called
self.description = description
self.property = property
self.operator = operator
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleConditionSingleProperty':
"""Initialize a RuleConditionSingleProperty object from a json dictionary."""
args = {}
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'property' in _dict:
args['property'] = _dict.get('property')
else:
raise ValueError('Required property \'property\' not present in RuleConditionSingleProperty JSON')
if 'operator' in _dict:
args['operator'] = _dict.get('operator')
else:
raise ValueError('Required property \'operator\' not present in RuleConditionSingleProperty JSON')
if 'value' in _dict:
args['value'] = _dict.get('value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleConditionSingleProperty object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'property') and self.property is not None:
_dict['property'] = self.property
if hasattr(self, 'operator') and self.operator is not None:
_dict['operator'] = self.operator
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleConditionSingleProperty object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleConditionSingleProperty') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleConditionSingleProperty') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OperatorEnum(str, Enum):
"""
The way in which the `property` field is compared to its value.
There are three types of operators: string, numeric, and boolean.
"""
IS_TRUE = 'is_true'
IS_FALSE = 'is_false'
IS_EMPTY = 'is_empty'
IS_NOT_EMPTY = 'is_not_empty'
STRING_EQUALS = 'string_equals'
STRING_NOT_EQUALS = 'string_not_equals'
STRING_MATCH = 'string_match'
STRING_NOT_MATCH = 'string_not_match'
NUM_EQUALS = 'num_equals'
NUM_NOT_EQUALS = 'num_not_equals'
NUM_LESS_THAN = 'num_less_than'
NUM_LESS_THAN_EQUALS = 'num_less_than_equals'
NUM_GREATER_THAN = 'num_greater_than'
NUM_GREATER_THAN_EQUALS = 'num_greater_than_equals'
IPS_IN_RANGE = 'ips_in_range'
STRINGS_IN_LIST = 'strings_in_list'
class RuleRequiredConfigMultipleProperties(RuleRequiredConfig):
"""
The requirements that must be met to determine the resource's level of compliance in
accordance with the rule.
Use logical operators (`and`/`or`) to define multiple property checks and conditions.
To define requirements for a rule, list one or more property check objects in the
`and` array. To add conditions to a property check, use `or`.
"""
def __init__(self) -> None:
"""
Initialize a RuleRequiredConfigMultipleProperties object.
"""
# pylint: disable=super-init-not-called
msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format(
", ".join(['RuleRequiredConfigMultiplePropertiesConditionOr', 'RuleRequiredConfigMultiplePropertiesConditionAnd']))
raise Exception(msg)
class RuleRequiredConfigSingleProperty(RuleRequiredConfig):
"""
The requirement that must be met to determine the resource's level of compliance in
accordance with the rule.
To apply a single property check, define a configuration property and the desired
value that you want to check against.
:attr str description: (optional)
:attr str property: A resource configuration variable that describes the
property that you want to apply to the target resource.
Available options depend on the target service and resource.
:attr str operator: The way in which the `property` field is compared to its
value.
There are three types of operators: string, numeric, and boolean.
:attr str value: (optional) The way in which you want your property to be
applied.
Value options differ depending on the rule that you configure. If you use a
boolean operator, you do not need to input a value.
"""
def __init__(self,
property: str,
operator: str,
*,
description: str = None,
value: str = None) -> None:
"""
Initialize a RuleRequiredConfigSingleProperty object.
:param str property: A resource configuration variable that describes the
property that you want to apply to the target resource.
Available options depend on the target service and resource.
:param str operator: The way in which the `property` field is compared to
its value.
There are three types of operators: string, numeric, and boolean.
:param str description: (optional)
:param str value: (optional) The way in which you want your property to be
applied.
Value options differ depending on the rule that you configure. If you use a
boolean operator, you do not need to input a value.
"""
# pylint: disable=super-init-not-called
self.description = description
self.property = property
self.operator = operator
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleRequiredConfigSingleProperty':
"""Initialize a RuleRequiredConfigSingleProperty object from a json dictionary."""
args = {}
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'property' in _dict:
args['property'] = _dict.get('property')
else:
raise ValueError('Required property \'property\' not present in RuleRequiredConfigSingleProperty JSON')
if 'operator' in _dict:
args['operator'] = _dict.get('operator')
else:
raise ValueError('Required property \'operator\' not present in RuleRequiredConfigSingleProperty JSON')
if 'value' in _dict:
args['value'] = _dict.get('value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleRequiredConfigSingleProperty object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'property') and self.property is not None:
_dict['property'] = self.property
if hasattr(self, 'operator') and self.operator is not None:
_dict['operator'] = self.operator
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleRequiredConfigSingleProperty object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleRequiredConfigSingleProperty') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleRequiredConfigSingleProperty') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OperatorEnum(str, Enum):
"""
The way in which the `property` field is compared to its value.
There are three types of operators: string, numeric, and boolean.
"""
IS_TRUE = 'is_true'
IS_FALSE = 'is_false'
IS_EMPTY = 'is_empty'
IS_NOT_EMPTY = 'is_not_empty'
STRING_EQUALS = 'string_equals'
STRING_NOT_EQUALS = 'string_not_equals'
STRING_MATCH = 'string_match'
STRING_NOT_MATCH = 'string_not_match'
NUM_EQUALS = 'num_equals'
NUM_NOT_EQUALS = 'num_not_equals'
NUM_LESS_THAN = 'num_less_than'
NUM_LESS_THAN_EQUALS = 'num_less_than_equals'
NUM_GREATER_THAN = 'num_greater_than'
NUM_GREATER_THAN_EQUALS = 'num_greater_than_equals'
IPS_IN_RANGE = 'ips_in_range'
STRINGS_IN_LIST = 'strings_in_list'
class RuleRequiredConfigMultiplePropertiesConditionAnd(RuleRequiredConfigMultipleProperties):
"""
A condition with the `and` logical operator.
:attr str description: (optional)
:attr List[RuleCondition] and_:
"""
def __init__(self,
and_: List['RuleCondition'],
*,
description: str = None) -> None:
"""
Initialize a RuleRequiredConfigMultiplePropertiesConditionAnd object.
:param List[RuleCondition] and_:
:param str description: (optional)
"""
# pylint: disable=super-init-not-called
self.description = description
self.and_ = and_
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleRequiredConfigMultiplePropertiesConditionAnd':
"""Initialize a RuleRequiredConfigMultiplePropertiesConditionAnd object from a json dictionary."""
args = {}
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'and' in _dict:
args['and_'] = _dict.get('and')
else:
raise ValueError('Required property \'and\' not present in RuleRequiredConfigMultiplePropertiesConditionAnd JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleRequiredConfigMultiplePropertiesConditionAnd object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'and_') and self.and_ is not None:
and_list = []
for x in self.and_:
if isinstance(x, dict):
and_list.append(x)
else:
and_list.append(x.to_dict())
_dict['and'] = and_list
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleRequiredConfigMultiplePropertiesConditionAnd object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleRequiredConfigMultiplePropertiesConditionAnd') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleRequiredConfigMultiplePropertiesConditionAnd') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RuleRequiredConfigMultiplePropertiesConditionOr(RuleRequiredConfigMultipleProperties):
"""
A condition with the `or` logical operator.
:attr str description: (optional)
:attr List[RuleCondition] or_:
"""
def __init__(self,
or_: List['RuleCondition'],
*,
description: str = None) -> None:
"""
Initialize a RuleRequiredConfigMultiplePropertiesConditionOr object.
:param List[RuleCondition] or_:
:param str description: (optional)
"""
# pylint: disable=super-init-not-called
self.description = description
self.or_ = or_
@classmethod
def from_dict(cls, _dict: Dict) -> 'RuleRequiredConfigMultiplePropertiesConditionOr':
"""Initialize a RuleRequiredConfigMultiplePropertiesConditionOr object from a json dictionary."""
args = {}
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'or' in _dict:
args['or_'] = _dict.get('or')
else:
raise ValueError('Required property \'or\' not present in RuleRequiredConfigMultiplePropertiesConditionOr JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RuleRequiredConfigMultiplePropertiesConditionOr object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'or_') and self.or_ is not None:
or_list = []
for x in self.or_:
if isinstance(x, dict):
or_list.append(x)
else:
or_list.append(x.to_dict())
_dict['or'] = or_list
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RuleRequiredConfigMultiplePropertiesConditionOr object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RuleRequiredConfigMultiplePropertiesConditionOr') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RuleRequiredConfigMultiplePropertiesConditionOr') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| StarcoderdataPython |
1730981 | def CalculateHowManyRelationsWithShortestPathInDataset (Sentences):
POS_NEG_DICT = {"Positives":0 , "Negatives":0};
CLASS_TP_DICT = {"NEG":0} ;
for sentence in Sentences:
for pair in sentence["PAIRS"]:
if (pair.has_key("TOPKP")) and (pair.has_key("TOPKP_Features")):
if (pair["TOPKP"] <> None) and (pair["TOPKP_Features"] <> None): #an example might have pair["TOPKP_Features"] = [] in a very rare and very very unlikely condition
#1-Is any type of relation (=Positive) or not (=Negative)
if pair["POSITIVE"]:
POS_NEG_DICT["Positives"] += 1;
else:
POS_NEG_DICT["Negatives"] += 1;
#2-Is any type of relation (=Positive) or not (=Negative)
if pair["CLASS_TP"]==None:
CLASS_TP_DICT["NEG"]+=1 ;
else:
pair_class_tp = pair["CLASS_TP"] ;
if CLASS_TP_DICT.has_key (pair_class_tp):
CLASS_TP_DICT[pair_class_tp]+=1;
else:
CLASS_TP_DICT[pair_class_tp]=1;
Total_Example_CNT = POS_NEG_DICT["Positives"] + POS_NEG_DICT["Negatives"] ;
return POS_NEG_DICT , CLASS_TP_DICT , Total_Example_CNT;
def CalculateHowManyRelationsWithTOPKPaths (Sentences):
POS_NEG_DICT = {"Positives":0 , "Negatives":0};
CLASS_TP_DICT = {"NEG":0} ;
for sentence in Sentences:
for pair in sentence["PAIRS"]:
if pair.has_key("TOPKP"):
if (pair["TOPKP"] <> None):
#1-Is any type of relation (=Positive) or not (=Negative)
if pair["POSITIVE"]:
POS_NEG_DICT["Positives"] += 1;
else:
POS_NEG_DICT["Negatives"] += 1;
#2-Is any type of relation (=Positive) or not (=Negative)
if pair["CLASS_TP"]==None:
CLASS_TP_DICT["NEG"]+=1 ;
else:
pair_class_tp = pair["CLASS_TP"] ;
if CLASS_TP_DICT.has_key (pair_class_tp):
CLASS_TP_DICT[pair_class_tp]+=1;
else:
CLASS_TP_DICT[pair_class_tp]=1;
Total_Example_CNT = POS_NEG_DICT["Positives"] + POS_NEG_DICT["Negatives"] ;
return POS_NEG_DICT , CLASS_TP_DICT , Total_Example_CNT;
def PartitionIntoEXACTLYNPartitions(self, L, N):
if N > len(L):
self.PROGRAM_Halt ("Error in LIST_PartitionIntoNPartitions: N should be in [1,len(L)]")
RES = [] ;
for i in range (0, N):
RES.append ([]) ;
for i in range(0,len(L)):
RES[i%N].append (L[i]) ;
return RES ;
def NVLR (S, Cnt):
if S == None:
return " " * Cnt ;
if len(S) < Cnt:
return str(S) + (" " * (Cnt - len(S)));
else:
return S[0:Cnt]
def NVLL (S, Cnt):
if S == None:
return " " * Cnt ;
if len(S) < Cnt:
return (" " * (Cnt - len(S))) + str(S) ;
else:
return S[0:Cnt]
"""
def CalculateNumberOfExamplesPerSentence (self, Sentences):
TOTAL_POSITIVES = 0 ;
TOTAL_NEGATIVES = 0 ;
for S in Sentences:
NumberOfExamples = {"negatives":0} ;
if (len(S["ENTITIES"])==0) or (len(S["PAIRS"])==0): #Second condition for those sentences that have one entity but self interaction which are apparently discarded in ANTTI work ...
S["NumberOfExamples"] = NumberOfExamples;
else:
for pair in S["PAIRS"]:
if pair["TOPKP"] <> None:
if pair["POSITIVE"] == True:
if self.Configs["ClassificationType"]=="binary":
if NumberOfExamples.has_key("positives"):
NumberOfExamples["positives"]+=1 ;
else:
NumberOfExamples["positives"]=1 ;
else:
CLASS_TP = pair["CLASS_TP"];
if NumberOfExamples.has_key(CLASS_TP):
NumberOfExamples[CLASS_TP]+=1 ;
else:
NumberOfExamples[CLASS_TP]=1 ;
else:
NumberOfExamples["negatives"]+=1;
S["NumberOfExamples"] = NumberOfExamples;
TOTAL_POSITIVES += sum (S["NumberOfExamples"][t] for t in S["NumberOfExamples"] if t <> "negatives");
TOTAL_NEGATIVES += S["NumberOfExamples"]["negatives"];
self.lp (["-"*50 , "Calculating Number of Examples per sentence." , "-"*20,
"EXAMPLE STATISTICS:",
"NUMBER OF POSITIVES : " + str(TOTAL_POSITIVES) ,
"NUMBER OF NEGATIVES : " + str(TOTAL_NEGATIVES) ,
"TOTAL : " + str(TOTAL_POSITIVES + TOTAL_NEGATIVES) , "-"*50]) ;
def CalculateDocumentWiseStatistics (self, Sentences , PrintStatistics = False):
docs = {};
for sentence in Sentences:
if docs.has_key(sentence["DOC_ID"]):
docs[sentence["DOC_ID"]].append (sentence);
else:
docs[sentence["DOC_ID"]] = [sentence];
docs_info = {};
for doc_id in docs:
docs_info[doc_id] = {"Interactions":{} , "Sentences":{} , "Sentence_Count":0};
for sentence in docs[doc_id]:
HAS_RELATION = False ;
for pair in sentence["PAIRS"]:
if pair["TOPKP"] <> None:
HAS_RELATION = True ;
if not sentence["ID"] in docs_info[doc_id]["Sentences"]:
docs_info[doc_id]["Sentences"][sentence["ID"]] = {} ;
if self.Configs["ClassificationType"] == "binary":
if pair["POSITIVE"]==True:
if docs_info[doc_id]["Interactions"].has_key("Positives"):
docs_info[doc_id]["Interactions"]["Positives"]+=1;
else:
docs_info[doc_id]["Interactions"]["Positives"]=1;
if docs_info[doc_id]["Sentences"][sentence["ID"]].has_key("Positives"):
docs_info[doc_id]["Sentences"][sentence["ID"]]["Positives"]+=1 ;
else:
docs_info[doc_id]["Sentences"][sentence["ID"]]["Positives"]=1 ;
else:
if docs_info[doc_id]["Interactions"].has_key("Negatives"):
docs_info[doc_id]["Interactions"]["Negatives"]+=1;
else:
docs_info[doc_id]["Interactions"]["Negatives"]=1;
if docs_info[doc_id]["Sentences"][sentence["ID"]].has_key("Negatives"):
docs_info[doc_id]["Sentences"][sentence["ID"]]["Negatives"]+=1 ;
else:
docs_info[doc_id]["Sentences"][sentence["ID"]]["Negatives"]=1 ;
else: #multiclass
if pair["CLASS_TP"]==None:
if docs_info[doc_id]["Interactions"].has_key("Negatives"):
docs_info[doc_id]["Interactions"]["Negatives"]+=1;
else:
docs_info[doc_id]["Interactions"]["Negatives"]=1;
if docs_info[doc_id]["Sentences"][sentence["ID"]].has_key("Negatives"):
docs_info[doc_id]["Sentences"][sentence["ID"]]["Negatives"]+= 1 ;
else:
docs_info[doc_id]["Sentences"][sentence["ID"]]["Negatives"]= 1 ;
else:
if docs_info[doc_id]["Interactions"].has_key(pair["CLASS_TP"]):
docs_info[doc_id]["Interactions"][pair["CLASS_TP"]]+=1;
else:
docs_info[doc_id]["Interactions"][pair["CLASS_TP"]]=1;
if docs_info[doc_id]["Sentences"][sentence["ID"]].has_key(pair["CLASS_TP"]):
docs_info[doc_id]["Sentences"][sentence["ID"]][pair["CLASS_TP"]]+= 1 ;
else:
docs_info[doc_id]["Sentences"][sentence["ID"]][pair["CLASS_TP"]]= 1 ;
if HAS_RELATION:
if docs_info[doc_id].has_key("Sentence_Count"):
docs_info[doc_id]["Sentence_Count"] += 1 ;
else:
docs_info[doc_id]["Sentence_Count"] = 1 ;
if PrintStatistics:
MSG = ["-"*55 , " -------- STATISTICS ABOUT EXAMPLES IN EACH DOCUMENT --------"] ;
for d_id in docs_info:
MSG.append (d_id + ": Sentences:" + str(docs_info[d_id]["Sentence_Count"]) + " Total Examples:" + str(sum([docs_info[d_id]["Interactions"][i] for i in docs_info[d_id]["Interactions"]])));
for s_id in docs_info[d_id]["Sentences"]:
MSG.append ("\t"+s_id+": " + str(docs_info[d_id]["Sentences"][s_id])) ;
MSG.append ("-");
self.lp (MSG);
return docs, docs_info ;
""" | StarcoderdataPython |
4812712 | <reponame>codehacken/Kb4ML<gh_stars>0
__author__ = 'ashwin'
__email__ = '<EMAIL>'
"""
All Test Code.
"""
from lib.models.classify import NaiveBayes
data_sep = ","
elim_var = ['$continuous$']
def test_naive_bayes(train_file_reader, test_file_reader):
# Create a Bernoulli NB.
naive_bayes = NaiveBayes()
# Vectorize the training data for Naive Bayes.
[train_ft_data, train_cl_data] = naive_bayes.vectorize_data(train_file_reader.file_feature_data,
train_file_reader.file_class_result)
# Train the model.
model = naive_bayes.train_model(train_ft_data, train_cl_data)
# Vectorize the test data.
[test_ft_data, test_cl_data] = naive_bayes.vectorize_data(test_file_reader.file_feature_data,
test_file_reader.file_class_result,
False)
# Test the data.
test_results = model.predict(test_ft_data)
score = naive_bayes.get_accuracy_score(test_cl_data, test_results)
return score
def test_nb_cross_product(train_file_reader, test_file_reader, cross_prod_columns):
# Create a Bernoulli NB.
naive_bayes = NaiveBayes()
# Create a Cross-product between two columns.
cross_train_ft_data = train_file_reader.cross_prod_var(train_file_reader.file_feature_data,
cross_prod_columns)
# Vectorize the training data for Naive Bayes.
[train_ft_data, train_cl_data] = naive_bayes.vectorize_data(cross_train_ft_data,
train_file_reader.file_class_result)
# Train the model.
model = naive_bayes.train_model(train_ft_data, train_cl_data)
cross_test_ft_data = test_file_reader.cross_prod_var(test_file_reader.file_feature_data,
cross_prod_columns)
# Vectorize the test data.
[test_ft_data, test_cl_data] = naive_bayes.vectorize_data(cross_test_ft_data,
test_file_reader.file_class_result,
False)
# Test the data.
test_results = model.predict(test_ft_data)
score = naive_bayes.get_accuracy_score(test_cl_data, test_results)
return score
| StarcoderdataPython |
154124 | def upper(val):
return val.upper()
| StarcoderdataPython |
3271445 | import random as rand
from math import exp, sqrt
from knapsack.hyper.single.problem import solve, validate
def temperature_ksp(t, iteration):
return sqrt(t)
def change_state_candidate_ksp(validator, seq, **kwargs):
while True:
copy = list(seq)
n = len(copy)
position_to_invert = rand.randint(0, n - 1)
copy[position_to_invert] = not copy[position_to_invert]
if validator(copy, **kwargs):
return copy
def simple_probability_change(current_state, candidate_state, delta_energy, temperature):
value = rand.random()
if value <= exp(-delta_energy / temperature):
return current_state
else:
return candidate_state
def energy_calculator_ksp(seq, **kwargs):
profit = solve(seq, **kwargs)
return 1 / profit if profit > 0 else float("inf")
def minimize(initial_state, tmin, tmax,
energy_calculator=energy_calculator_ksp,
change_state_candidate=change_state_candidate_ksp,
validator=validate,
temperature_change=temperature_ksp,
probability_change=simple_probability_change,
**kwargs):
t = tmax
current_state = initial_state
i = 1
while t > tmin:
candidate_state = change_state_candidate(validator, current_state, **kwargs)
delta_energy = energy_calculator(candidate_state, **kwargs) - energy_calculator(current_state, **kwargs)
if delta_energy <= 0:
current_state = candidate_state
else:
current_state = probability_change(current_state, candidate_state, delta_energy, t)
t = temperature_change(t, i)
i += 1
return current_state
| StarcoderdataPython |
1620701 | <filename>Inventationery/apps/DirParty/admin.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2015-11-16 19:08:22
# @Last Modified by: harmenta
# @Last Modified time: 2015-11-17 13:35:09
from django.contrib import admin
from .models import DirPartyModel
# Register your models here.
admin.site.register(DirPartyModel)
| StarcoderdataPython |
3387998 | <reponame>ScriptErrorVGM/Project2021
def main():
def isPrime(n):
if not n < 0 :
if n == 1:
return False
if n % 2 == 0:
return n == 2 and n > 0
d = 3
while d * d <= n and n % d != 0:
d += 2
return d * d > n
else:
return False
print(isPrime(-1))
print(isPrime(1))
print(isPrime(11))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1783483 | #%%
import sys
sys.path.append("../..")
import scipy
import numpy as np
from numpy.linalg import matrix_rank, matrix_power, cholesky, inv
import util.geometry_util as geo_util
from solvers.rigidity_solver.gradient import gradient_analysis
from solvers.rigidity_solver.internal_structure import tetrahedronize
from solvers.rigidity_solver.algo_core import solve_rigidity, spring_energy_matrix
from solvers.rigidity_solver.joints import Beam, Model, Hinge
from solvers.rigidity_solver.gradient import gradient_analysis
from visualization.model_visualizer import visualize_3D
import tests.testsamples
#%%
def sample_spherical(npoints, ndim=3):
vec = np.random.randn(npoints, ndim)
vec = geo_util.rowwise_normalize(vec)
return vec
axes_list = [
sample_spherical(4) for i in range(10000)
]
objectives = []
from testsamples import tetra
from tqdm import tqdm
# for axes in tqdm(axes_list):
if True:
model = tetra.square_centering_axes()
points = model.point_matrix()
edges = model.edge_matrix()
A = model.constraint_matrix()
hinges = model.joints
hinge_axes = np.array([h.axis for h in hinges])
hinge_pivots = np.array([h.pivot_point for h in hinges])
hinge_point_indices = model.joint_point_indices()
extra_constraints = np.zeros((len(model.beams[0].points) * 3, points.shape[0] * 3))
for r, c in enumerate(range(len(model.beams[0].points) * 3)):
extra_constraints[r, c] = 1
trivial_motions = geo_util.trivial_basis(points, dim=3)
extra_constraints = trivial_motions
A = np.vstack([A, extra_constraints])
M = spring_energy_matrix(points, edges, dim=3)
# mathematical computation
B = scipy.linalg.null_space(A)
T = np.transpose(B) @ B
S = B.T @ M @ B
L = cholesky(T)
L_inv = inv(L)
Q = np.linalg.multi_dot([L_inv.T, S, L_inv])
pairs = geo_util.eigen(Q, symmetric=True)
eigenvalues = np.array([v for v, e in pairs])
print("DoF:", np.sum(np.isclose(eigenvalues, 0)))
obj, eigenvector = pairs[0]
arrows = B @ eigenvector
torch_obj = gradient_analysis(
points,
edges,
hinge_axes,
hinge_pivots,
hinge_point_indices,
extra_constraints=extra_constraints,
iters=1
).detach().numpy()
print(torch_obj, obj)
# visualize_3D(points, edges=edges, arrows=arrows.reshape(-1, 3))
objectives.append((obj, axes))
print(objectives)
#%%
print(max(objectives, key=lambda p: p[0]))
print(min(objectives, key=lambda p: p[0]))
| StarcoderdataPython |
92484 | from .CSVFileUploadService import *
from .TaskExecutionService import * | StarcoderdataPython |
3335376 | <reponame>OctavianLee/Dahlia
# -*- coding: utf-8 -*-
from .templates import SortTemplate
class PigeonholeSort(SortTemplate):
"""Creates a class to implement the pigeonhole sort.
"""
def sort(self):
"""Uses the pigeonhole sort algorithm to sort.
This is a pigeonhole sort algorithm.
"""
pigeon_hole = [[] for i in range(self.length+1)]
"""
#The comment code is a special one of this algorithm.
for item in self.items:
pigeon_hole[item] += 1
index = 0
for i in range(self.length+1):
for k in range(pigeon_hole[i]):
self.items[index] = i
index += 1
"""
for item in self.items:
pigeon_hole[item] += [item]
index = 0
for hole in pigeon_hole:
if hole != []:
for item in hole:
self.items[index] = item
index += 1
| StarcoderdataPython |
1755870 | import requests, re, json, time
class watcher:
def twitterWatcher(self, user):
dicTweet = {}
if not user.startswith('http'):
urlAccount = "https://twitter.com/"+user
else:
urlAccount = user
req = requests.get(urlAccount)
page = req.text
if req.status_code == 200:
try:
tweets = re.findall(r"href=\"/.*/status/([0-9]+)\" .* data-time=\"([0-9]+)\" ", page)
countX = len(tweets)
for t in tweets:
tweet = urlAccount+"/status/"+t[0]
timestamp = int(t[1])
date = time.ctime(int(timestamp))
dicTweet[timestamp] = {"domain":"Twitter", "tweet":tweet, "date":date}
countX += 1
except:
dicTweet = {}
self.tweet = dicTweet
def instagramWatcher(self, user):
if not user.startswith('http'):
urlAccount = "https://instagram.com/"+user
else:
urlAccount = user
picturesList = []
req = requests.get(urlAccount)
if req.status_code == 200:
page = req.content.decode('utf-8')
jsonData = re.findall(r"<script type=\"text/javascript\">(.*);</script>", page)
jsonDataFound = jsonData[0].replace("window._sharedData = ", "")
private = re.findall(r"is_private\":(true|false)", page)
values = json.loads(jsonDataFound)
nbMedia = values['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']['count']
if nbMedia > 11:
nbMedia = 11
MediaDic = values['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']['edges']
countX = 0
picturesList = {}
if not private:
while countX <= nbMedia:
displayMedia = MediaDic[countX]['node']['display_url']
legende = MediaDic[countX]['node']['edge_media_to_caption']['edges'][0]['node']['text']
isVideo = MediaDic[countX]['node']['is_video']
location = MediaDic[countX]['node']['location']
timestamp = MediaDic[countX]['node']['taken_at_timestamp']
date = time.ctime(timestamp)
try:
infoMedia = MediaDic[countX]['node']['accessibility_caption']
except:
infoMedia = ""
if isVideo:
typeMedia = "Video"
else:
typeMedia = "Photo"
picturesList[timestamp] = {"domain":"Instagram", "urlMedia":displayMedia, "type":typeMedia, "legende":legende, "info":infoMedia, "location":location, "date":date, "timestamp":timestamp}
countX += 1
else:
picturesList = {}
self.medias = picturesList | StarcoderdataPython |
12637 | <reponame>threefoldtech/Threefold-Circles
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api import serializers
from taiga.base.fields import Field, MethodField
from taiga.base.neighbors import NeighborsSerializerMixin
from taiga.mdrender.service import render as mdrender
from taiga.projects.attachments.serializers import BasicAttachmentsInfoSerializerMixin
from taiga.projects.mixins.serializers import OwnerExtraInfoSerializerMixin
from taiga.projects.mixins.serializers import ProjectExtraInfoSerializerMixin
from taiga.projects.mixins.serializers import AssignedToExtraInfoSerializerMixin
from taiga.projects.mixins.serializers import StatusExtraInfoSerializerMixin
from taiga.projects.notifications.mixins import WatchedResourceSerializer
from taiga.projects.tagging.serializers import TaggedInProjectResourceSerializer
from taiga.projects.votes.mixins.serializers import VoteResourceSerializerMixin
class EpicListSerializer(VoteResourceSerializerMixin, WatchedResourceSerializer,
OwnerExtraInfoSerializerMixin, AssignedToExtraInfoSerializerMixin,
StatusExtraInfoSerializerMixin, ProjectExtraInfoSerializerMixin,
BasicAttachmentsInfoSerializerMixin,
TaggedInProjectResourceSerializer, serializers.LightSerializer):
id = Field()
ref = Field()
project = Field(attr="project_id")
created_date = Field()
modified_date = Field()
subject = Field()
color = Field()
epics_order = Field()
client_requirement = Field()
team_requirement = Field()
version = Field()
watchers = Field()
is_blocked = Field()
blocked_note = Field()
is_closed = MethodField()
user_stories_counts = MethodField()
def get_is_closed(self, obj):
return obj.status is not None and obj.status.is_closed
def get_user_stories_counts(self, obj):
assert hasattr(obj, "user_stories_counts"), "instance must have a user_stories_counts attribute"
return obj.user_stories_counts
class EpicSerializer(EpicListSerializer):
comment = MethodField()
blocked_note_html = MethodField()
description = Field()
description_html = MethodField()
def get_comment(self, obj):
return ""
def get_blocked_note_html(self, obj):
return mdrender(obj.project, obj.blocked_note)
def get_description_html(self, obj):
return mdrender(obj.project, obj.description)
class EpicNeighborsSerializer(NeighborsSerializerMixin, EpicSerializer):
pass
class EpicRelatedUserStorySerializer(serializers.LightSerializer):
epic = Field(attr="epic_id")
user_story = Field(attr="user_story_id")
order = Field()
| StarcoderdataPython |
46630 | <filename>dlex/tf/instance_v1.py
import logging
import os
import random
from collections import OrderedDict, namedtuple
from datetime import datetime
import tensorflow.compat.v1 as tf
from dlex import FrameworkBackend, TrainingProgress
from dlex.configs import Params
from dlex.datasets.tf import Dataset
from dlex.tf.models.base_v1 import BaseModelV1
from dlex.tf.utils.model_utils import get_model
from dlex.utils import get_num_iters_from_interval, get_num_seconds_from_interval, Datasets
from dlex.utils.logging import logger
from dlex.utils.model_utils import get_dataset
from tensorflow.estimator import LoggingTensorHook, CheckpointSaverListener, \
EstimatorSpec, TrainSpec, EvalSpec
from tqdm import tqdm
tf.disable_v2_behavior()
EvaluationResults = namedtuple("EvaluationResults", "results outputs")
class TensorflowV1Backend(FrameworkBackend):
def __init__(
self,
params: Params = None,
training_idx: int = None,
report_queue=None):
super().__init__(params, training_idx, report_queue)
logging.getLogger("tensorflow").setLevel(logging.INFO)
logger.info(f"Training started ({training_idx}).")
# tf.enable_eager_execution()
# tf.random.set_random_seed(params.seed)
# X_train, y_train = dataset_train.load_data()
# X_test, y_test = dataset_test.load_data()
# train_generator = ImageDataGenerator(
# rescale=1.0/255, horizontal_flip=True,
# width_shift_range=4.0/32.0, height_shift_range=4.0/32.0)
# test_generator = ImageDataGenerator(rescale=1.0/255)
# y_train = to_categorical(y_train)
# y_test = to_categorical(y_test)
def load_model(self, mode) -> (BaseModelV1, Datasets):
"""
Load model and dataset
:param mode: train, test, dev
:param report:
:param argv:
:param params: if None, configs will be read from file
:param args:
:return:
"""
args = self.configs.args
params = self.params
# Init dataset
dataset_builder = get_dataset(params)
assert dataset_builder, "Dataset not found."
if not args.no_prepare:
dataset_builder.prepare(download=args.download, preprocess=args.preprocess)
datasets = Datasets(
"tensorflow", dataset_builder,
train_set=params.train.train_set,
valid_set=params.train.valid_set,
test_sets=params.test.test_sets)
# Init model
model_cls = get_model(params)
model = model_cls(params, datasets.train_set) # type: BaseModelV1
# model.summary()
# log model summary
# parameter_details = [["Name", "Shape", "Trainable"]]
# num_params = 0
# num_trainable_params = 0
# for n in tf.get_default_graph().as_graph_def().node:
# parameter_details.append([
# n.name,
# "test",
# "✓" if False else ""])
# num_params += np.prod(list(parameter.shape))
# if parameter.requires_grad:
# num_trainable_params += np.prod(list(parameter.shape))
# s = table2str(parameter_details)
# logger.debug(f"Model parameters\n{s}")
# logger.debug(" - ".join([
# f"No. parameters: {num_params:,}",
# f"No. trainable parameters: {num_trainable_params:,}"
# ]))
# report.param_details = s
# report.num_params = num_params
# report.num_trainable_params = num_trainable_params
# use_cuda = torch.cuda.is_available()
# if use_cuda and params.gpu:
# gpus = [f"cuda:{g}" for g in params.gpu]
# model = DataParellelModel(model, gpus)
# logger.info("Start training using %d GPU(s): %s", len(params.gpu), str(params.gpu))
# torch.cuda.set_device(torch.device(gpus[0]))
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model.to(gpus[0])
# else:
# model = DataParellelModel(model, ['cpu'])
# logger.debug("Dataset: %s. Model: %s", str(dataset_builder), str(model_cls))
# if use_cuda:
# logger.info("CUDA available: %s", torch.cuda.get_device_name(0))
return model, datasets
def run_train(self) -> None:
self.train_with_session()
def run_evaluate(self) -> None:
params = self.params
model, datasets = self.load_model("test")
model.build_graph()
saver = tf.train.Saver(max_to_keep=1)
if params.train.ema_decay_rate:
ema_saver = tf.train.Saver(model.ema.variables_to_restore(), max_to_keep=5)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
local_init = tf.local_variables_initializer()
sess.run(local_init)
model.load_checkpoint(sess, saver, tag="latest")
model.load_checkpoint(sess, ema_saver, tag="latest")
for name, dataset in datasets.test_sets.items():
res = self.evaluate_with_session(
sess,
model,
dataset,
output_path=os.path.join(params.log_dir, "results"),
output_tag=f"evaluate_val")
logger.info(f"[{name}]: {str(res.results)}")
def train_with_session(self) -> None:
params = self.params
args = self.args
model, datasets = self.load_model("train")
model.build_graph()
logger.info("Successfully built model")
logger.info("Training metrics: %s", list(model.metric_ops.keys()))
step = 0
if self.args.debug:
next_debug_step = 0
debug_ops = model.get_debug_ops()
saver = tf.train.Saver(
max_to_keep=1)
if params.train.ema_decay_rate:
ema_saver = tf.train.Saver(model.ema.variables_to_restore(), max_to_keep=5)
self.report.set_model_summary(
variable_names=[var.name for var in tf.trainable_variables()],
variable_shapes=[var.shape.as_list() for var in tf.trainable_variables()],
variable_trainable=[var.trainable for var in tf.trainable_variables()]
)
prog = TrainingProgress(params, len(datasets.train_set))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
local_init = tf.local_variables_initializer()
global_init = tf.global_variables_initializer()
sess.graph.finalize()
sess.run(global_init)
if args.load:
model.load_checkpoint(args.load, sess, saver)
logger.info("Loaded checkpoint: %s", args.load)
for epoch in range(1, params.train.num_epochs + 1):
sess.run(local_init)
prog.new_epoch(epoch)
model.set_training(True)
batch_size = self.params.train.batch_size * len(params.gpu)
if self.params.dataset.shuffle:
datasets.train_set.shuffle()
data = datasets.train_set.data
batches = []
for batch_start in range(0, len(data), batch_size):
batches.append(data[batch_start:batch_start + batch_size])
# if self.params.shuffle:
# random.shuffle(batches)
with tqdm(total=len(data), desc=f"Epoch {epoch}") as t:
for batch in batches:
feed = {}
datasets.train_set.populate_feed_dict(feed, model.placeholders, batch)
model.populate_feed_dict(feed, is_training=True)
_, global_step, *metrics = sess.run([
model.train_op,
model.global_step,
*list(model.metric_ops.values())], feed_dict=feed)
if self.args.debug and step >= next_debug_step:
vals = sess.run(list(debug_ops.values()), feed_dict=feed)
for name, val in zip(debug_ops.keys(), vals):
logger.debug(f"{name}\n{val}")
input()
num_steps = input("Number of steps do you want to run (default: 1): ") or 1
next_debug_step += int(num_steps)
t.update(len(batch))
prog.update(len(batch))
t.set_postfix(dict(
**{key: val[0] for key, val in zip(model.metric_ops.keys(), metrics)}
))
step += 1
# Save model
if prog.should_save():
model.save_checkpoint(sess, saver, "latest")
# Log
if prog.should_log():
logger.info(", ".join([
f"epoch: {epoch}",
f"global step: {global_step}",
f"progress: {int(prog.epoch_progress * 100)}%",
*[f"{name}: {val[0]:.4f}" for name, val in zip(model.metric_ops.keys(), metrics)]
]))
model.save_checkpoint(sess, saver, "latest")
# model.load_checkpoint(sess, saver, "latest")
for name, dataset in datasets.test_sets.items():
res = self.evaluate_with_session(
sess,
model,
dataset,
output_path=params.checkpoint_dir,
output_tag="latest")
self.report.add_epoch_results(res.results)
self.update_report()
logger.info(res.results)
self.report.results = self.report.current_results
self.update_report()
return self.report
def evaluate_with_session(
self,
sess,
model: BaseModelV1,
dataset: Dataset,
output_path: str = None,
output_tag: str = None) -> EvaluationResults:
batch_size = self.params.train.batch_size
data = dataset.data
all_preds = []
all_refs = []
batches = []
outputs = []
for batch_start in range(0, len(data), batch_size):
batches.append(data[batch_start:batch_start + batch_size])
with tqdm(total=len(data), desc=f"Eval") as t:
for batch in batches:
feed = {}
dataset.populate_feed_dict(feed, model.placeholders, batch)
model.populate_feed_dict(feed, is_training=False)
pred, ref, *metrics = sess.run(
[model.predictions, model.references, *list(model.metric_ops.values())],
feed_dict=feed)
pred = pred if type(pred) == list else list(pred)
ref = ref if type(ref) == list else list(ref)
assert len(pred) == len(ref) == len(batch)
all_preds += pred
all_refs += ref
t.update(len(batch))
t.set_postfix(**{key: val[0] for key, val in zip(model.metric_ops.keys(), metrics)})
for p, b in zip(pred, batch):
str_input, str_ground_truth, str_predicted = dataset.format_output(p, b)
outputs.append(dict(
input=str_input,
reference=str_ground_truth,
hypothesis=str_predicted))
# logger.debug(outputs[-1])
results = {}
for metric in self.params.test.metrics:
results[metric] = dataset.evaluate(all_preds, all_refs, metric, output_path)
if self.params.test.output and output_path:
path = dataset.write_results_to_file(
all_preds,
# sample_ids,
output_path,
output_tag,
self.params.test.output)
dataset.builder.run_evaluation_script(path)
for output in random.choices(outputs, k=20):
logger.debug(output)
return EvaluationResults(
results={key: results[key] for key in results},
outputs=outputs)
def train_with_estimator(self):
run_config = tf.estimator.RunConfig(
model_dir=self.params.checkpoint_dir,
save_checkpoints_steps=get_num_iters_from_interval(self.params.train.save_every),
save_checkpoints_secs=get_num_seconds_from_interval(self.params.train.save_every),
save_summary_steps=100,
keep_checkpoint_max=1
)
def model_fn(features, labels, mode, params):
output = self.model.forward(features)
loss = self.model.get_loss(features, output)
train_op = self.model.get_train_op(loss)
metric_ops = self.model.get_metric_ops(features, output)
return EstimatorSpec(
mode=mode, loss=loss,
train_op=train_op,
eval_metric_ops=metric_ops,
training_hooks=[
TqdmHook(OrderedDict(loss=loss), len(self.datasets.train_set), params['batch_size']),
tf.estimator.LoggingTensorHook(dict(loss=loss), every_n_iter=10)
],
evaluation_hooks=[
TqdmHook(OrderedDict(metrics=metric_ops['acc']), len(self.datasets.test), params['batch_size'])
])
estimator = tf.estimator.Estimator(
model_fn=model_fn,
params=dict(batch_size=self.params.train.batch_size),
config=run_config)
self.report.launch_time = datetime.now()
num_train_steps = int(len(self.datasets.train_set) / self.params.train.batch_size * self.params.train.num_epochs)
train_spec = TrainSpec(
input_fn=self.datasets.train_set.input_fn,
max_steps=num_train_steps)
eval_spec = EvalSpec(
input_fn=self.datasets.test.input_fn,
steps=5,
start_delay_secs=150,
throttle_secs=200
)
logger.debug(train_spec)
logger.info("Training started.")
# estimator.train(
# input_fn=datasets.train._input_fn,
# max_steps=num_train_steps,
# hooks=[
# TqdmHook(model.loss, len(datasets.train), params.train.batch_size)
# ]
# )
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
logger.info("Training done.")
def set_seed(self):
super().set_seed()
tf.set_random_seed(self.params.random_seed)
class TqdmHook(tf.estimator.SessionRunHook):
def __init__(self, postfix: OrderedDict, total, batch_size):
self.postfix = postfix
# self._timer = SecondOrStepTimer(every_steps=1)
self._should_trigger = False
self._iter_count = 0
self._pbar = None
self.total = total
self.batch_size = batch_size
def begin(self):
pass
# self._timer.reset()
@property
def pbar(self):
if not self._pbar:
self._pbar = tqdm(desc="Train", total=self.total)
return self._pbar
def before_run(self, run_context):
# self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
return tf.estimator.SessionRunArgs(dict(
global_step=tf.train.get_or_create_global_step(), **self.postfix))
def after_run(self, run_context, run_values):
# if self._should_trigger:
res = run_values.results
self.pbar.update(self.batch_size)
if self.pbar.n > self.total:
self.pbar.n = self.pbar.n % self.total
self.pbar.set_description("Epoch %d" % ((res['global_step'] * self.batch_size) // self.total + 1))
pf = OrderedDict({name: str(res[name]) for name in self.postfix})
self.pbar.set_postfix(pf)
self.pbar.refresh()
class EvalLogHook(LoggingTensorHook):
def __init__(self, metric_ops):
super().__init__()
self.metric_ops = metric_ops
def begin(self):
super().begin()
def after_run(self, run_context, run_values):
super().after_run(run_context, run_values)
logger.debug(run_values)
class CheckpointSaverListenerEx(CheckpointSaverListener):
def __init__(self):
pass
def begin(self):
pass
def after_save(self, session, global_step_value):
logger.info("Checkpoint saved.")
# logger.info("Evaluating model...")
# results = self.estimator.evaluate(
# input_fn=self.datasets.test.input_fn,
# steps=None)
# logger.debug(str(results)) | StarcoderdataPython |
146502 | <gh_stars>0
"""empty message
Revision ID: 1096526e6a14
Revises: c93540c85c6c
Create Date: 2020-10-02 14:43:04.971989
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '1096526e6a14'
down_revision = 'c93540c85c6c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('camera', sa.Column('mac_address', sa.String(length=20), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('camera', 'mac_address')
# ### end Alembic commands ###
| StarcoderdataPython |
1776847 | <filename>tests/sentry/api/endpoints/test_assistant.py
from __future__ import absolute_import
from copy import deepcopy
from exam import fixture
from django.core.urlresolvers import reverse
from django.utils import timezone
from sentry.assistant import manager
from sentry.models import AssistantActivity
from sentry.testutils import APITestCase
class AssistantActivityTest(APITestCase):
def setUp(self):
super(AssistantActivityTest, self).setUp()
self.login_as(user=self.user)
self.path = reverse("sentry-api-0-assistant")
self.guides = manager.all()
def test_invalid_inputs(self):
# Missing status
resp = self.client.put(self.path, {"guide_id": 1})
assert resp.status_code == 400
# Invalid guide id
resp = self.client.put(self.path, {"guide_id": 1938, "status": "dismissed"})
assert resp.status_code == 400
# Invalid status
resp = self.client.put(self.path, {"guide_id": 1, "status": "whats_my_name_again"})
assert resp.status_code == 400
def test_activity(self):
guides_with_seen = deepcopy(manager.all())
for g in guides_with_seen:
guides_with_seen[g]["seen"] = False
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp.data == guides_with_seen
# Dismiss the guide and make sure it is not returned again.
resp = self.client.put(self.path, {"guide_id": 3, "status": "dismissed"})
assert resp.status_code == 201
resp = self.client.get(self.path)
guides_with_seen["issue_stream"]["seen"] = True
assert resp.status_code == 200
assert resp.data == guides_with_seen
def test_validate_guides(self):
# Steps in different guides should not have the same target.
guides = self.guides.values()
for i in range(len(guides)):
for j in range(0, i):
steps_i = set(s["target"] for s in guides[i]["steps"])
steps_j = set(s["target"] for s in guides[j]["steps"])
assert not (steps_i & steps_j)
class AssistantActivityV2Test(APITestCase):
endpoint = "sentry-api-0-assistant"
@fixture
def guides(self):
return manager.all()
def setUp(self):
super(AssistantActivityV2Test, self).setUp()
self.create_organization(owner=self.user)
self.login_as(user=self.user)
def test_simple(self):
resp = self.get_response(qs_params={"v2": 1})
assert resp.status_code == 200
assert len(resp.data) == len(manager.all())
for guide in resp.data:
assert guide["seen"] is False
def test_dismissed(self):
guide = "issue_stream"
AssistantActivity.objects.create(
user=self.user, guide_id=self.guides[guide]["id"], dismissed_ts=timezone.now()
)
resp = self.get_response(qs_params={"v2": 1})
assert resp.status_code == 200
assert {"guide": guide, "seen": True} in resp.data
def test_viewed(self):
guide = "issue_stream"
AssistantActivity.objects.create(
user=self.user, guide_id=self.guides[guide]["id"], viewed_ts=timezone.now()
)
resp = self.get_response(qs_params={"v2": 1})
assert resp.status_code == 200
assert {"guide": guide, "seen": True} in resp.data
class AssistantActivityV2UpdateTest(APITestCase):
endpoint = "sentry-api-0-assistant"
method = "put"
@fixture
def guides(self):
return manager.all()
def setUp(self):
super(AssistantActivityV2UpdateTest, self).setUp()
self.create_organization(owner=self.user)
self.login_as(user=self.user)
def test_invalid_inputs(self):
resp = self.get_response(guide="guide_does_not_exist")
assert resp.status_code == 400
resp = self.get_response(guide="guide_does_not_exist", status="dismissed")
assert resp.status_code == 400
resp = self.get_response(status="dismissed")
assert resp.status_code == 400
resp = self.get_response(guide="issue", status="whats_my_name_again")
assert resp.status_code == 400
def test_dismissed(self):
guide = "issue_stream"
resp = self.get_response(guide=guide, status="dismissed")
assert resp.status_code == 201
activity = AssistantActivity.objects.get(user=self.user, guide_id=self.guides[guide]["id"])
assert activity.dismissed_ts
assert not activity.viewed_ts
def test_viewed(self):
guide = "issue_stream"
resp = self.get_response(guide=guide, status="viewed")
assert resp.status_code == 201
activity = AssistantActivity.objects.get(user=self.user, guide_id=self.guides[guide]["id"])
assert activity.viewed_ts
assert not activity.dismissed_ts
| StarcoderdataPython |
3219040 | <gh_stars>1-10
# coding: utf8
from __future__ import absolute_import
from pycropml.transpiler.errors import PseudoCythonTypeCheckError
from pycropml.transpiler.helpers import serialize_type
from Cython.Compiler import ExprNodes
from six.moves import zip
# based on pseudo
V = '_' # we don't really typecheck or care for a lot of the arg types, so just use this
_ = ()
'''Methods used to:
-check data types compatibility
- inference type compared with type declared
- methods on builtin functions to check consistency
'''
def builtin_type_check(namespace, function, receiver, args):
fs = TYPED_API[namespace]
if fs == 'library':
fs = TYPED_API['_%s' % namespace]
# print(namespace, function, receiver, args, TYPED_API[namespace])
# input(0)
if function not in fs:
raise PseudoCythonTypeCheckError('wrong usage of %s' % str(function))
x = fs[function]
a = namespace + '#' + function if receiver else namespace + ':' + function
if namespace == 'List' or namespace == 'array':
if not isinstance(receiver['pseudo_type'], list):
generics = {'@t': args[0]['pseudo_type']}
if receiver['pseudo_type']=="List": receiver["pseudo_type"]=["List",args[0]['pseudo_type']]
else:
generics = {'@t': receiver['pseudo_type'][1]}
elif namespace == 'dict':
generics = {'@k': receiver['pseudo_type'][1], '@v': receiver['pseudo_type'][2]}
else:
generics = {}
s = []
if x[0][0] == '*':
e = x[0][1:]
for arg in args:
s.append(simplify(e, generics))
arg_check(s[-1], arg, a)
else:
if len(x) - 1 != len(args):
raise PseudoCythonTypeCheckError("%s expects %d args not %d" % (a, len(x) - 1, len(args)))
for e, arg in zip(x[:-1], args):
s.append(simplify(e, generics))
#arg_check(s[-1], arg, a) to do
s.append(simplify(x[-1], generics))
return s
def arg_check(expected_type, args, a):
if expected_type != args['pseudo_type'] and expected_type != 'Any' and not(expected_type == 'Number' and (args['pseudo_type'] == 'int' or args['pseudo_type'] == 'float' or args['pseudo_type'] == 'double')):
raise PseudoCythonTypeCheckError('%s expected %s not %s' % (a, serialize_type(expected_type), serialize_type(args['pseudo_type'])))
def simplify(kind, generics):
if not generics:
return kind
elif isinstance(kind, str):
if kind[0] == '@' and kind in generics:
return generics[kind]
else:
return kind
else:
return [simplify(child, generics) for child in kind]
# refactoring here in future
def add(l, r):
if l == 'float' and r in ['float', 'int'] or r == 'float' and l in ['float', 'int'] :
return [l, r, 'float']
elif l == 'double' and r in ['float', 'int','double'] or r == 'double' and l in ['float', 'int',"double"] :
return [l, r, 'double']
elif l == 'int' and r == 'int':
return [l, r, 'int']
elif l == 'str' and r == 'str':
return [l, r, 'str']
elif isinstance(l, list) and l[0] == "List" and l == r:
return [l, r, l]
elif l =="unknown" or r=="unknown":
return [l, r, "unknown"]
elif l =="unknown" or r=="unknown":
return [l, r, "unknown"]
else:
return [l, r, "unknown"] #### to change
#raise PseudoCythonTypeCheckError("wrong types for +: %s and %s" % (serialize_type(l), serialize_type(r)))
def sub(l, r):
if l == 'float' and r in ['float', 'int'] or r == 'float' and l in ['float', 'int'] :
return [l, r, 'float']
elif l == 'double' and r in ['float', 'int','double'] or r == 'double' and l in ['float', 'int',"double"] :
return [l, r, 'double']
elif l == 'int' and r == 'int':
return [l, r, 'int']
elif l == 'str' and r == 'str':
return [l, r, 'str']
elif isinstance(l, list) and l[0] == "List" and l == r:
return [l, r, l]
elif l =="unknown" or r=="unknown":
return [l, r, "unknown"]
elif l =="unknown" or r=="unknown":
return [l, r, "unknown"]
else:
return [l, r, "unknown"] #### to change
#raise PseudoCythonTypeCheckError("wrong types for +: %s and %s" % (serialize_type(l), serialize_type(r)))
def mul(l, r):
if l in ['float', 'double'] and r in ['float', 'int', 'double'] :
return [l, r, 'float']
elif r in ['float', 'double'] and l in ['float', 'int', 'double'] :
return [l, r, 'float']
elif l == 'int' and r == 'int':
return [l, r, 'int']
elif l == 'int' and (isinstance(r, list) and r[0] == "List" or r == 'str'):
return [l, r, r]
elif r == 'int' and (isinstance(l, list) and l[0] == "List" or l == 'str'):
return [l, r, l]
elif l =="unknown" or r=="unknown":
return [l, r, "unknown"]
else:
return [l, r, "unknown"] #### to change
#raise PseudoCythonTypeCheckError("wrong types for *: %s and %s" % (serialize_type(l), serialize_type(r)))
def div(l, r, lo=None):
if l in ['float', 'double'] and r in ['float', 'int', 'double'] :
return [l, r, 'float']
elif r in ['float', 'double'] and l in ['float', 'int', 'double'] :
return [l, r, 'float']
elif l == 'int' and r == 'int':
return [l, r, 'int']
#raise PseudoCythonTypeCheckError("cast one variable at line %s position %s between /: %s and %s" % (lo[0], lo[1], serialize_type(l), serialize_type(r)))
elif l =="unknown" or r=="unknown":
return [l, r, "unknown"]
else:
return [l, r, "unknown"] #### to change
#raise PseudoCythonTypeCheckError("wrong types for /: %s and %s" % (serialize_type(l), serialize_type(r)))
def pow_(l, r):
if l == 'float' and r in ['float', 'int'] or r == 'float' and l in ['float', 'int']:
return [l, r, 'float']
elif l == 'int' and r == 'int':
return [l, r, 'int']
elif l =="unknown" or r=="unknown":
return [l, r, "unknown"]
else:
raise PseudoCythonTypeCheckError("wrong types for **: %s and %s" % (serialize_type(l), serialize_type(r)))
def mod(l, r):
if l == 'int' and r == 'int':
return [l, r, 'int']
elif l == 'str' and (r == 'str' or r == ['array', 'str']):
return [l, ['array', 'str'], 'str']
else:
raise PseudoCythonTypeCheckError("wrong types for modulo : %s and %s" % (serialize_type(l), serialize_type(r)))
def and_(l, r):
if l == 'bool' and r == 'bool':
return 'bool'
else:
raise PseudoCythonTypeCheckError("wrong types for and: %s and %s" % (serialize_type(l), serialize_type(r)))
def or_(l, r):
if l == 'bool' and r == 'bool':
return 'bool'
else:
raise PseudoCythonTypeCheckError("wrong types for or: %s and %s" % (serialize_type(l), serialize_type(r)))
def binary_and(l, r):
if l == r == 'int':
return l
else:
raise PseudoCythonTypeCheckError("wrong types for &: %s and %s" % (serialize_type(l), serialize_type(r)))
def binary_or(l, r):
if l == r == 'int' or l == r == 'Set':
return l
else:
raise PseudoCythonTypeCheckError("wrong types for |: %s and %s" % (serialize_type(l), serialize_type(r)))
TYPED_API = {
'system':{
"max":['Number','Number']
},
'math': {
'abs': ["Number", "Number"],
'tan': ['Number', 'double'],
'atan': ['Number', 'double'],
'sin': ['Number', 'double'],
'asin': ['Number', 'double'],
'cos': ['double', 'double'],
'acos': ['double', 'double'],
'log': ['double', 'double', 'double'],
'ln': ["Number", "double"],
'sqrt': ['Number', 'double'],
'ceil': ['double', 'int'],
'exp': ['double','double'],
'PI': 'PI',
'Max': ['Number','Number'],
'Min': ['Number','Number'],
'Round': ['double','double'],
'floor': ["double", "int"],
'abs': ["Number", "Number"]
},
'operators': {
'+': add,
'-': sub,
'*': mul,
'/': div,
'**': pow_,
'%': mod,
'&': binary_and,
'|': binary_or,
},
'List': {
'append': ['@t', ['List', '@t']],
'sum': [['List', '@t'], '@t'],
'extend': [["List",'@t'], ['List', '@t']],
'contains?': ["@t", "bool"],
'Remove': ['@t','bool'],
'Insert': ['int','@t', ["List", '@t']],
'len': ['int'],
'index': ['@t','int'],
},
'array': {
'append': ['@t', ['List', '@t']],
'sum': [['List', '@t'], '@t'],
'extend': [["List",'@t'], ['List', '@t']],
'contains?': ["@t", "bool"],
'Remove': ['@t','bool'],
'Insert': ['int','@t', ["List", '@t']],
'len': ['int'],
'index': ['@t','int'],
},
}
ORIGINAL_METHODS = {
'List': {
'Add': 'Add(element)',
'pop': 'pop',
'insert': 'insert(element)',
'insert_at': 'insert(element, index)',
'index': 'index(element)',
'concat': '+',
'repeat': '*',
'extend': 'extend(other)',
'remove': 'remove',
'length': 'len',
'copy':'copy',
'map': 'list comprehension / map',
'filter': 'list comprehension / filter'
},
'dict': {
'keys': 'keys',
'values': 'values',
'length': 'len',
'copy':'copy',
'get': 'get(element)'
},
'int': {
'int': 'int',
'float': 'float'
},
'float': {
'int': 'int',
'float': 'float'
},
'str': {
'find': 'index(substr)',
'join': 'join(elements)',
'split': 'split(delimiter)',
'c_format': '%',
'format': 'format(*elements)',
'upper': 'upper',
'lower': 'lower',
'title': 'title',
'center': 'center',
'find_from': 'index(substr, index)',
'int': 'int'
},
'array': {
'length': 'len',
'find': 'find(element)',
'count': 'count(element)',
'append': 'append(element)'
},
'tuple': {
'length': 'len'
}
}
BUILTIN_TYPES = {
'int': 'int',
'float': 'float',
'object': 'Object',
'str': 'str',
'List': 'List',
'dict': 'dict',
'tuple': 'tuple',
'bool': 'bool',
'array': 'array',
"datetime": 'datetime'
}
PSEUDON_BUILTIN_TYPES = {v: k for k, v in BUILTIN_TYPES.items()}
BUILTIN_SIMPLE_TYPES = {
'int': 'int',
'float': 'float',
'str': 'str',
'boolean': 'bool',
'double':'double'
}
KEY_TYPES = {'str', 'int', 'float', 'bool','double'}
PSEUDO_KEY_TYPES = {'str', 'int', 'float', 'bool','double'}
BUILTIN_FUNCTIONS = {'print', 'input', 'str', 'set', 'int','float', 'len', 'any', 'all', 'sum', 'min', 'max', 'abs','pow', "mean", "count", "copy", "integr"}
FORBIDDEN_TOP_LEVEL_FUNCTIONS = {'map', 'filter'}
ITERABLE_TYPES = {'str', "List", 'dict', 'array'}
TESTABLE_TYPE = 'bool'
INDEXABLE_TYPES = {'str', "List", 'dict', 'array', 'tuple'}
COMPARABLE_TYPES = {'int', 'float', 'str'}
TYPES_WITH_LENGTH = {'str', "List", 'dict', 'array', 'tuple', 'Set'}
NUMBER_TYPES = {'int', 'float','double'}
##############################################
| StarcoderdataPython |
33687 | <gh_stars>0
from django.conf.urls import include, url
from django.contrib import admin
from httpproxy.views import HttpProxy
from django.views.generic.base import RedirectView
from django.http import HttpResponse
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^transcripts/', include('nuremberg.transcripts.urls')),
url(r'^documents/', include('nuremberg.documents.urls')),
url(r'^photographs/', include('nuremberg.photographs.urls')),
url(r'^search/', include('nuremberg.search.urls')),
url(r'^', include('nuremberg.content.urls')),
url(r'^proxy_image/printing/(?P<url>.*)$',
# RedirectView.as_view(url='http://nuremberg.law.harvard.edu/imagedir/HLSL_NUR_printing/%(url)s')),
HttpProxy.as_view(base_url='http://nuremberg.law.harvard.edu/imagedir/HLSL_NUR_printing')),
url(r'^proxy_image/(?P<url>.*)$',
# RedirectView.as_view(url='http://s3.amazonaws.com/nuremberg-documents/%(url)s'))
HttpProxy.as_view(base_url='http://s3.amazonaws.com/nuremberg-documents')),
url(r'^robots.txt$', lambda r: HttpResponse("User-agent: *\nDisallow: /search/", content_type="text/plain")),
]
handler400 = 'nuremberg.core.views.handler400'
handler403 = 'nuremberg.core.views.handler403'
handler404 = 'nuremberg.core.views.handler404'
handler500 = 'nuremberg.core.views.handler500'
| StarcoderdataPython |
3204885 | #!/usr/bin/python
# -*- coding:utf-8 -*-
from gsiot.v3 import *
from gsiot.v3.file.jsonfile import gsJsonFile
class dbFile(gsJsonFile):
def __init__(self,filename):
gsJsonFile.__init__(self,filename)
self.fields=[]
self.flag=False
self.Readfile()
def Savefile(self):
try:data=self.data.toJsonString(self.flag)
except:data=""
if data!="":
self.Open("w")
self.Write(data)
self.Close()
def Readfile(self):
if os.path.isfile(self.filename)==True:
self.Open("r")
data=self.Read()
if data!="":
self.Clear()
try:self.data.update(eval(data))
except:self.data.readdict(json.loads(data))
else:self.data=edict()
self.Close()
else:
self.Savefile()
return self.data
class dbRecord(gsJsonFile):pass | StarcoderdataPython |
3206264 | from django.contrib import admin
from froide.account.models import Profile
class ProfileAdmin(admin.ModelAdmin):
raw_id_fields = ('user',)
search_fields = ['user__username', 'user__first_name', 'user__last_name', 'user__email']
list_display = ('user', 'address')
admin.site.register(Profile, ProfileAdmin)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.