id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3246014 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# pylint: disable=E0401
import logging
from cryptoauthlib import constant as ATCA_CONSTANTS
log = logging.getLogger("ateccX08a.tests_selftest")
def run(device=None):
if not device:
raise ValueError("device")
tests = (
(ATCA_CONSTANTS.SELFTEST_MODE_RNG, "RNG"),
(ATCA_CONSTANTS.SELFTEST_MODE_ECDSA_SIGN_VERIFY, "ECDSA_SIGN_VERIFY"),
(ATCA_CONSTANTS.SELFTEST_MODE_ECDH, "ECDH"),
(ATCA_CONSTANTS.SELFTEST_MODE_AES, "AES"),
(ATCA_CONSTANTS.SELFTEST_MODE_SHA, "SHA"),
(ATCA_CONSTANTS.SELFTEST_MODE_ALL, "ALL")
)
for mode, mode_str in tests:
status = device.atcab_selftest(mode)
assert status
log.debug("atcab_selftest %s: %s", mode_str, status)
| StarcoderdataPython |
3115 | # pyRasp
# Copyright (c) <NAME> 2020. Licensed under MIT.
# requirement :
# Python 3
# pip install pyyaml
# pip install request
# pip install f90nml
from downloadGFSA import downloadGFSA
from prepare_wps import prepare_wps
from ungrib import ungrib
from metgrid import metgrid
from prepare_wrf import prepare_wrf
from real import real
from wrf import wrf
result = downloadGFSA(True)
prepare_wps(result)
ungrib()
metgrid()
prepare_wrf(result)
real()
wrf()
| StarcoderdataPython |
3391841 | import sys
from collections import deque
import matplotlib as mpl
import numpy as np
from PyQt5 import QtCore, QtWidgets
import Runge_Kutta as Rk
from ui import Ui
# Constants
rc0 = 0.6
m = 1
a = 4.3
t0 = 0
t = t0
dt = 0.025
tmaxn = 200
ti = 0
r0 = 1
Pr0 = 1
r = r0
Pr = Pr0
r_range = (-6, 6)
Pr_range = (-6, 6)
V_max = (-6, 6)
# Global Objects
r_rt = deque(maxlen=tmaxn)
Pr_rt = deque(maxlen=tmaxn)
t_rt = np.linspace(0, -tmaxn * dt, 200)
rS = np.linspace(*r_range, 1000)
stream = None
Vgraph1 = None
Vgraph2 = None
bgCache_rPr = None
bgCache_rV = None
bgCache_rt = None
bgCache_Prt = None
# Problem Funtions
V = lambda x: -a / x + a * rc0**2 / (3 * x**3)
rDotf = lambda Pr: Pr / m
PrDotf = lambda r: a * rc0**2 / r**4 - a / r**2
def PrepareFigure():
global ax_rPr, ax_rV, ax_rt, ax_Prt
axes = MyUI.canvas.fig.subplots(2, 2)
((ax_rPr, ax_rV), (ax_rt, ax_Prt)) = axes
MyUI.canvas.fig.tight_layout(h_pad=2, w_pad=1)
for axis in axes.reshape(4, 1):
axis[0].cla()
axis[0].yaxis.grid(color='gainsboro',
linestyle='dotted',
linewidth=1.5)
axis[0].xaxis.grid(color='gainsboro',
linestyle='dotted',
linewidth=0.8)
axis[0].axhline(0, linestyle='dotted', color='grey')
axis[0].axvline(0, linestyle='dotted', color='grey')
ax_rPr.set_title('Position vs Momentum')
ax_rPr.set_xlabel(r'$r$', loc='right')
ax_rPr.set_ylabel(r'$P_r$', loc='top', rotation=0)
ax_rPr.set_xlim(r_range)
ax_rPr.set_ylim(Pr_range)
ax_rV.set_title('Position vs Potential')
ax_rV.set_xlabel(r'$r$', loc='right')
ax_rV.set_ylabel(r'$V(r)$', loc='top', rotation=0)
ax_rV.set_xlim(r_range)
ax_rV.set_ylim(V_max)
ax_rt.set_title('Time vs Position')
ax_rt.set_xlabel(r'$t$', loc='right')
ax_rt.set_ylabel(r'$r(t)$', loc='top', rotation=0)
ax_rt.set_xlim((-tmaxn * dt, tmaxn * dt / 5))
ax_rt.set_ylim(r_range)
ax_Prt.set_title('Time vs Momentum')
ax_Prt.set_xlabel(r'$t$', loc='right')
ax_Prt.set_ylabel(r'$Pr(t)$', loc='top', rotation=0)
ax_Prt.set_xlim((-tmaxn * dt, tmaxn * dt / 5))
ax_Prt.set_ylim(r_range)
def plot_fields():
global stream, Vgraph1, Vgraph2
density = 50
# r vs Pr Plot
if not (stream == None):
stream.lines.remove()
for art in ax_rPr.get_children():
if not isinstance(art, mpl.patches.FancyArrowPatch):
continue
art.remove()
rMesh, PrMesh = np.meshgrid(rS, np.linspace(*Pr_range, density))
u = rDotf(PrMesh)
v = PrDotf(rMesh)
stream = ax_rPr.streamplot(rMesh,
PrMesh,
u,
v,
density=1.5,
arrowsize=0.7,
linewidth=0.5,
color='blue')
# r vs V plot
if not (Vgraph1 == None):
Vgraph1.remove()
Vgraph2.remove()
Vgraph1, = ax_rV.plot(rS[:499], V(rS[:499]), color='blue', linewidth=0.7)
Vgraph2, = ax_rV.plot(rS[500:], V(rS[500:]), color='blue', linewidth=0.7)
MyUI.canvas.draw()
def plot_points(animated=False):
global pointr, pointV, linert, linePrt
pointr, = ax_rPr.plot(r,
Pr,
marker='o',
color='r',
markersize=4,
animated=animated)
pointV, = ax_rV.plot(r,
V(r),
marker='o',
color='r',
markersize=4,
animated=animated)
linert, = ax_rt.plot(t_rt[:len(r_rt)],
r_rt,
color='r',
marker='o',
markevery=[0],
markersize=4,
linewidth=0.8,
animated=animated)
linePrt, = ax_Prt.plot(t_rt[:len(r_rt)],
Pr_rt,
color='g',
marker='o',
markevery=[0],
markersize=4,
linewidth=0.8,
animated=animated)
def animate():
global t, r, Pr, r_rt, Pr_rt, ti
t, r, Pr = next(rk4)
r_rt.appendleft(r)
Pr_rt.appendleft(Pr)
update_points(blit=True)
ti += 1
def start_animation():
global ti
solve_rk4()
refresh_plots(animated=True)
cache_bg()
update_points(blit=True)
ti = 0
timer.start()
def cache_bg():
global bgCache_rPr, bgCache_rV, bgCache_rt, bgCache_Prt
bgCache_rPr = MyUI.canvas.copy_from_bbox(ax_rPr.bbox)
bgCache_rV = MyUI.canvas.copy_from_bbox(ax_rV.bbox)
bgCache_rt = MyUI.canvas.copy_from_bbox(ax_rt.bbox)
bgCache_Prt = MyUI.canvas.copy_from_bbox(ax_Prt.bbox)
def stop_animation():
timer.stop()
refresh_plots()
def Redraw_fields():
global m, a, rc0
m = MyUI.txt_m.value()
a = MyUI.txt_a.value()
rc0 = MyUI.txt_rc0.value()
plot_fields()
refresh_plots()
def refresh_plots(animated=False):
pointr.remove()
pointV.remove()
linert.remove()
linePrt.remove()
plot_points(animated=animated)
MyUI.canvas.draw()
def update_points(blit=False):
pointr.set_xdata(r)
pointr.set_ydata(Pr)
pointV.set_xdata(r)
pointV.set_ydata(V(r))
linert.set_xdata(t_rt[:len(r_rt)])
linert.set_ydata(r_rt)
linePrt.set_xdata(t_rt[:len(Pr_rt)])
linePrt.set_ydata(Pr_rt)
if blit == True:
MyUI.canvas.restore_region(bgCache_rPr)
ax_rPr.draw_artist(pointr)
MyUI.canvas.blit(ax_rPr.bbox)
MyUI.canvas.restore_region(bgCache_rV)
ax_rV.draw_artist(pointV)
MyUI.canvas.blit(ax_rV.bbox)
MyUI.canvas.restore_region(bgCache_rt)
ax_rt.draw_artist(linert)
MyUI.canvas.blit(ax_rt.bbox)
MyUI.canvas.restore_region(bgCache_Prt)
ax_Prt.draw_artist(linePrt)
MyUI.canvas.blit(ax_Prt.bbox)
MyUI.canvas.flush_events()
elif blit == False:
MyUI.canvas.draw()
def on_click(event: mpl.backend_bases.MouseEvent):
global r_rt, Pr_rt, r, Pr
stop_animation()
if event.inaxes in [ax_rPr]:
r_rt = deque(maxlen=tmaxn)
Pr_rt = deque(maxlen=tmaxn)
r = event.xdata
Pr = event.ydata
update_points()
if event.inaxes in [ax_rV]:
r_rt = deque(maxlen=tmaxn)
Pr_rt = deque(maxlen=tmaxn)
r = event.xdata
update_points()
def solve_rk4():
global rk4
def f(t, r, Pr):
return rDotf(Pr)
def g(t, r, Pr):
return PrDotf(r)
rk4 = Rk.RKG_Generator(F=[f, g],
xi=0,
yi=[r, Pr],
h=dt,
Bt=Rk.Butcher_Tableau('Classic-4th'))
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MyUI = Ui()
MyUI.Button_Run.clicked.connect(start_animation)
MyUI.Button_Pause.clicked.connect(stop_animation)
MyUI.Button_Redraw.clicked.connect(Redraw_fields)
MyUI.canvas.mpl_connect("button_press_event", on_click)
MyUI.txt_rc0.setValue(rc0)
MyUI.txt_m.setValue(m)
MyUI.txt_a.setValue(a)
PrepareFigure()
plot_fields()
plot_points()
timer = QtCore.QTimer()
timer.setInterval(50)
timer.timeout.connect(animate)
MyUI.showMaximized()
app.exec_() | StarcoderdataPython |
3295386 | #!/bin/python3
# The MIT License (MIT)
# Copyright © 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""GPT2 Language Modelling miner
The genesis miner.
Example:
$ python miners/gpt2_genesis.py
To run with a config file:
$ python miners/gpt2_genesis.py --config <path to config file>
"""
import argparse
import math
import os
import sys
import time
import torch
import time
import bittensor
import torch.nn.functional as F
from termcolor import colored
from munch import Munch
from loguru import logger
from torch.utils.tensorboard import SummaryWriter
from bittensor.utils.model_utils import ModelToolbox
from synapses.gpt2 import GPT2Synapse
from torch.nn.utils import clip_grad_norm_
from transformers import AdamW
from qqdm import qqdm, format_str
from bittensor.dataloaders.text_dataloader import GenesisTextDataloader
class Miner( bittensor.miner.Miner ):
def __init__(self, config: Munch = None, **kwargs):
if config == None:
config = Miner.default_config()
bittensor.config.Config.update_with_kwargs(config.miner, kwargs)
Miner.check_config(config)
self.config = config
# ---- Model ----
self.model = GPT2Synapse( self.config )
# ---- Model Load/Save tools ----
self.model_toolbox = ModelToolbox(GPT2Synapse, AdamW)
# ---- Optimizer ----
self.optimizer = self.configure_optimizers()
self.lr = self.config.miner.learning_rate
self.training_loss = math.inf
self.best_train_loss = math.inf
self.rloss = math.inf
self.lloss = math.inf
self.dloss = math.inf
# ---- Dataset ----
# The Genesis Dataset:
# The dataset used to train Adam and his first 100 children.
self.dataset = GenesisTextDataloader(self.config.miner.batch_size_train, self.model.get_block_size())
self.tokens = 0
super( Miner, self ).__init__( self.config, **kwargs )
@staticmethod
def default_config() -> Munch:
parser = argparse.ArgumentParser()
Miner.add_args(parser)
config = bittensor.config.Config.to_config(parser)
return config
@staticmethod
def add_args(parser: argparse.ArgumentParser):
parser.add_argument(
'--miner.learning_rate',
default=3e-2,
type=float,
help='Training initial learning rate.'
)
parser.add_argument(
'--miner.weight_decay',
default=0.25,
type=float,
help='Model parameter weight decay.'
)
parser.add_argument(
'--miner.lr_decay',
default=True,
type=bool,
help='learning rate decay params: linear warmup followed by cosine decay to 10%% of original.'
)
parser.add_argument(
'--miner.warmup_tokens',
default=375e6,
type=float,
help='A linear LR warmup over the first miner.warmup_tokens tokens (default is 365 million)'
)
parser.add_argument(
'--miner.final_tokens',
default=260e9,
type=float,
help='At what point we reach 10%% of original LR'
)
parser.add_argument(
'--miner.clip_gradients',
default=1.0,
type=float,
help='Implement gradient clipping to avoid exploding loss on smaller architectures.'
)
parser.add_argument(
'--miner.n_epochs',
default=int(sys.maxsize),
type=int,
help='Number of training epochs.'
)
parser.add_argument(
'--miner.epoch_length',
default=500,
type=int,
help='Iterations of training per epoch'
)
parser.add_argument(
'--miner.batch_size_train',
default=2,
type=int,
help='Training batch size.'
)
parser.add_argument('--miner.name', default='gpt2_genesis', type=str, help='Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name ')
GPT2Synapse.add_args( parser )
bittensor.miner.Miner.add_args( parser )
GenesisTextDataloader.add_args( parser )
@staticmethod
def check_config(config: Munch):
assert config.miner.batch_size_train > 0, "batch_size_train must a positive value"
assert config.miner.learning_rate > 0, "learning_rate must be a positive value."
bittensor.miner.Miner.check_config( config )
GenesisTextDataloader.check_config( config )
def configure_optimizers(self):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding, torch.nn.Tanh)
for mn, m in self.model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.model.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": self.config.miner.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=self.config.miner.learning_rate, betas=(0.9, 0.95))
return optimizer
# --- Main loop ----
def run (self):
# ---- Subscribe ----
with self:
# ---- Weights ----
self.row = self.metagraph.row.to(self.model.device)
# --- Run state ---
self.global_step = 0
# --- Loop for epochs ---
for self.epoch in range(self.config.miner.n_epochs):
# ---- Serve ----
self.axon.serve( self.model )
# ---- Train Model ----
self.train()
# If model has borked for some reason, we need to make sure it doesn't emit weights
# Instead, reload into previous version of model
if torch.any(torch.isnan(torch.cat([param.view(-1) for param in self.model.parameters()]))):
self.model, self.optimizer = self.model_toolbox.load_model(self.config)
continue
# ---- Emitting weights ----
try:
self.metagraph.set_weights(self.row, wait_for_inclusion = True) # Sets my row-weights on the chain.
# ---- Sync metagraph ----
self.metagraph.sync() # Pulls the latest metagraph state (with my update.)
except:
logger.error("Failed to set weights and sync metagraph! Could be a connection ")
self.row = self.metagraph.row.to(self.model.device)
# ---- Update Tensorboard ----
self.dendrite.__to_tensorboard__(self.tensorboard, self.global_step)
self.metagraph.__to_tensorboard__(self.tensorboard, self.global_step)
self.axon.__to_tensorboard__(self.tensorboard, self.global_step)
# ---- Save best loss and model ----
if self.training_loss < self.best_train_loss: #self.epoch % 10 == 0:
self.best_train_loss = self.training_loss # update best train loss
self.model_toolbox.save_model(
self.config.miner.full_path,
{
'epoch': self.epoch,
'model_state_dict': self.model.state_dict(),
'loss': self.best_train_loss/3,
'optimizer_state_dict': self.optimizer.state_dict(),
'rloss' : self.rloss,
'lloss': self.lloss,
'dloss': self.dloss,
}
)
self.tensorboard.add_scalar('Neuron/Train_loss', self.training_loss, self.global_step)
logger.info("This epoch's training losses: L-Loss: {:.2f} | R-Loss: {:.2f} | D-Loss: {:.2f} | avg: {:.2f} ... Current best average training loss: {:.2f}".format(self.lloss, self.rloss, self.dloss, self.training_loss/3, self.best_train_loss/3))
def decay_learning_rate(self, batch):
"""Decay the learning rate based on the progress thus far.
Adjusts the self.config.miner.learning_rate according to the
tokens processed so far, returns number of tokens.
Args:
tokens (int): Number of tokens processed so far.
"""
if self.config.miner.lr_decay:
# number of tokens processed this step
self.tokens += (batch >= 0).sum()
if self.tokens < self.config.miner.warmup_tokens:
# linear warmup
lr_mult = float(self.tokens) / float(max(1, self.config.miner.warmup_tokens))
else:
# cosine learning rate decay
progress = float(self.tokens - self.config.miner.warmup_tokens) / float(max(1, self.config.miner.final_tokens - self.config.miner.warmup_tokens))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
self.lr = self.config.miner.learning_rate * lr_mult
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
else:
self.lr = self.config.miner.learning_rate
def get_lr(self):
for param_group in self.optimizer.param_groups:
return param_group['lr']
# ---- Train Epoch ----
def train(self):
def run_epoch():
self.model.train(True)
losses = []
rlosses = []
llosses = []
dlosses = []
# we train for an epoch.
logger.info("Preparing dataset batch...")
# Set up the dataloader
dataloader = self.dataset.dataloader(self.config.miner.epoch_length)
pbar = qqdm(enumerate(dataloader), total=len(dataloader), desc=format_str('blue', f'Epoch Progress'))
for it, (batch) in pbar:
# ---- Forward pass ----
batch = batch.to(self.model.device)
output = self.model.remote_forward(self, batch, training=True)
# ---- Backward pass ----
loss = output.local_target_loss + output.distillation_loss + output.remote_target_loss
loss.backward()
# ---- Gradient Step ----
clip_grad_norm_(self.model.parameters(), self.config.miner.clip_gradients)
self.optimizer.step()
self.optimizer.zero_grad()
self.decay_learning_rate(batch)
# Add losses up
losses.append(loss.item())
llosses.append(output.local_target_loss.item())
rlosses.append(output.remote_target_loss.item())
dlosses.append(output.distillation_loss.item())
# ---- Train row weights ----
batch_weights = torch.mean(output.router.weights, axis = 0).to(self.model.device) # Average over batch.
self.row = (1 - 0.03) * self.row + 0.03 * batch_weights # Moving avg update.
self.row = F.normalize(self.row, p = 1, dim = 0) # Ensure normalization.
# ---- Logging ----
index = self.metagraph.state.index_for_uid[self.metagraph.uid]
pbar.set_infos({
'GS': colored('{}'.format(self.global_step), 'red'),
'LS': colored('{}'.format(it), 'blue'),
'Epoch': colored('{}'.format(self.epoch+1), 'green'),
'L-loss': colored('{:.5f}'.format(output.local_target_loss.item()), 'red'),
'R-loss': colored('{:.5f}'.format(output.remote_target_loss.item()), 'blue'),
'D-loss': colored('{:.5f}'.format(output.distillation_loss.item()), 'green'),
'lr': colored('{:e}'.format(self.lr), 'white'),
'nPeers': self.metagraph.n,
'Stake(\u03C4)': float(self.metagraph.S[index]),
'Rank(\u03C4)': float(self.metagraph.R[index]),
'Incentive(\u03C4/block)': float(self.metagraph.I[index]),
'Axon': self.axon.__str__(),
'Dendrite': self.dendrite.__str__(),
})
self.tensorboard.add_scalar('Neuron/Rloss', output.remote_target_loss.item(), self.global_step)
self.tensorboard.add_scalar('Neuron/Lloss', output.local_target_loss.item(), self.global_step)
self.tensorboard.add_scalar('Neuron/Dloss', output.distillation_loss.item(), self.global_step)
self.global_step += 1
avg_loss = sum(losses) / len(losses)
self.rloss = sum(rlosses) / len(rlosses)
self.lloss = sum(llosses) / len(llosses)
self.dloss = sum(dlosses) / len(dlosses)
self.training_loss = avg_loss
run_epoch()
if __name__ == "__main__":
# ---- Build and Run ----
miner = Miner()
logger.info(bittensor.config.Config.toString(miner.config))
miner.run()
| StarcoderdataPython |
1640325 | """ Tests for validation report results, relies on test for loaders passing """
from decimal import DivisionByZero
from pathlib import Path
import os
from pytest import MonkeyPatch
import pytest
from dp_tools.bulkRNASeq.entity import BulkRNASeqSample
from dp_tools.bulkRNASeq.loaders import (
load_BulkRNASeq_STAGE_00,
load_BulkRNASeq_STAGE_01,
)
from dp_tools.bulkRNASeq.vv_protocols import STAGE, BulkRNASeq_VVProtocol
@pytest.fixture(autouse=True)
def mock_dev_exceptions(monkeypatch):
monkeypatch.setattr(
"dp_tools.core.check_model.ALLOWED_DEV_EXCEPTIONS", (DivisionByZero)
) # ensure unhandled developer exceptions are raised
def test_bulkRNASeq_STAGE00_validation_paired(caplog, glds194_dataSystem_STAGE00):
"""This tests validation as it would be run on dataset after demultiplexing"""
CAPLEVEL = 20
caplog.set_level(CAPLEVEL)
ds = glds194_dataSystem_STAGE00
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, dry_run=True, protocol_name="only raw"
)
with caplog.at_level(CAPLEVEL):
vv_protocol.validate_all()
assert isinstance(vv_protocol.flags["dataset"], dict)
assert isinstance(vv_protocol.flags["sample"], dict)
assert isinstance(vv_protocol.flags["component"], dict)
# second, run with full validation
with caplog.at_level(CAPLEVEL):
caplog.clear()
with MonkeyPatch.context() as m:
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
df_verbose = vv_protocol.flags_to_df(schema="verbose")
# assert that no failing flags were raised
# assert df["flag_code"].max() == 20 # not needed as this tests the truncated data rather than the logic
# check if appropriate number of flags are raised
# Currently:
# Dataset check : 2
# Sample check : 1 per sample
# Component checks :
# Reads : 1 per component
assert len(df) == 41
assert [0] == list(
df["flag_code"].unique()
) # only the dry run code should be returned
def test_bulkRNASeq_STAGE00_validation_paired_no_dry_run(
caplog, glds194_dataSystem_STAGE00
):
"""This tests validation as it would be run on dataset after demultiplexing"""
CAPLEVEL = 20
caplog.set_level(CAPLEVEL)
ds = glds194_dataSystem_STAGE00
vv_protocol = BulkRNASeq_VVProtocol(dataset=ds.dataset, protocol_name="only raw")
with caplog.at_level(CAPLEVEL):
vv_protocol.validate_all()
assert isinstance(vv_protocol.flags["dataset"], dict)
assert isinstance(vv_protocol.flags["sample"], dict)
assert isinstance(vv_protocol.flags["component"], dict)
# second, run with full validation
with caplog.at_level(CAPLEVEL):
caplog.clear()
with MonkeyPatch.context() as m:
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
df_verbose = vv_protocol.flags_to_df(schema="verbose")
# assert that no failing flags were raised
# assert df["flag_code"].max() == 20 # not needed as this tests the truncated data rather than the logic
# check if appropriate number of flags are raised
# Currently:
# Dataset check : 2
# Sample check : 1 per sample
# Component checks :
# Reads : 1 per component
assert len(df) == 41
def test_bulkRNASeq_STAGE00_validation_paired_with_skips(
caplog, glds194_dataSystem_STAGE00
):
"""This tests validation as it would be run on dataset after demultiplexing"""
CAPLEVEL = 20
caplog.set_level(CAPLEVEL)
ds = glds194_dataSystem_STAGE00
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset,
protocol_name="only raw",
dry_run=True,
skip_these_checks={"DATASET_RAWREADS_0001"},
)
with caplog.at_level(CAPLEVEL):
vv_protocol.validate_all()
assert isinstance(vv_protocol.flags["dataset"], dict)
assert isinstance(vv_protocol.flags["sample"], dict)
assert isinstance(vv_protocol.flags["component"], dict)
# second, run with full validation
with caplog.at_level(CAPLEVEL):
caplog.clear()
with MonkeyPatch.context() as m:
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
df_verbose = vv_protocol.flags_to_df(schema="verbose")
# assert that no failing flags were raised
# assert df["flag_code"].max() == 20 # not needed as this tests the truncated data rather than the logic
# check if appropriate number of flags are raised
# Currently:
# Dataset check : 2
# Sample check : 1 per sample
# Component checks :
# Reads : 1 per component
assert len(df) == 41
assert 0 in df["flag_code"].values # ensure dry run flag codes returned
assert 1 in df["flag_code"].values # ensure skip flag codes returned
def test_bulkRNASeq_STAGE00_validation_paired_with_config(
caplog, glds194_dataSystem_STAGE00
):
"""This tests validation as it would be run on dataset after demultiplexing"""
CAPLEVEL = 20
caplog.set_level(CAPLEVEL)
ds = glds194_dataSystem_STAGE00
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, config=("bulkRNASeq", "0"), protocol_name="only raw"
)
with caplog.at_level(CAPLEVEL):
vv_protocol.validate_all()
assert isinstance(vv_protocol.flags["dataset"], dict)
assert isinstance(vv_protocol.flags["sample"], dict)
assert isinstance(vv_protocol.flags["component"], dict)
# second, run with full validation
with caplog.at_level(CAPLEVEL):
caplog.clear()
with MonkeyPatch.context() as m:
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
df_verbose = vv_protocol.flags_to_df(schema="verbose")
# assert that no failing flags were raised
# assert df["flag_code"].max() == 20 # not needed as this tests the truncated data rather than the logic
# check if appropriate number of flags are raised
# Currently:
# Dataset check : 2
# Sample check : 1 per sample
# Component checks :
# Reads : 1 per component
assert len(df) == 41
assert 0 not in df["flag_code"].values # ensure dry run flag codes returned
assert 1 not in df["flag_code"].values # ensure skip flag codes returned
def test_bulkRNASeq_STAGE00_validation_single(caplog, glds48_dataSystem_STAGE00):
"""This tests validation as it would be run on dataset after demultiplexing"""
CAPLEVEL = 20
caplog.set_level(CAPLEVEL)
ds = glds48_dataSystem_STAGE00
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, protocol_name="only raw", dry_run=True
)
with MonkeyPatch.context() as m:
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
df_verbose = vv_protocol.flags_to_df(schema="verbose")
# check if appropriate number of flags are raised
# Currently:
# Dataset check : 2
# Sample check : 1 per sample
# Component checks
# Reads : 1 per component (1 per sample)
assert len(df) == 30
"""
def test_bulkRNASeq_STAGE01_validation_paired(glds194_dataSystem_STAGE01):
ds = glds194_dataSystem_STAGE01
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, stage_names=STAGE.Reads_PreProcessed, dry_run=True
)
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
assert len(df) == 81
def test_bulkRNASeq_STAGE01_validation_single(glds48_dataSystem_STAGE01):
ds = glds48_dataSystem_STAGE01
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, stage_names=STAGE.Reads_PreProcessed, dry_run=True
)
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
assert len(df) == 59
def test_bulkRNASeq_STAGE02_validation_paired(glds194_dataSystem_STAGE02):
ds = glds194_dataSystem_STAGE02
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, stage_names=STAGE.GenomeAligned, dry_run=True
)
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
assert len(df) == 95
def test_bulkRNASeq_STAGE02_validation_single(glds48_dataSystem_STAGE02):
ds = glds48_dataSystem_STAGE02
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, stage_names=STAGE.GenomeAligned, dry_run=True
)
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
assert len(df) == 74
def test_bulkRNASeq_STAGE03_validation_paired(glds194_dataSystem_STAGE03):
ds = glds194_dataSystem_STAGE03
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, stage_names=STAGE.GeneCounted, dry_run=True
)
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
assert len(df) == 97
def test_bulkRNASeq_STAGE03_validation_single(glds48_dataSystem_STAGE03):
ds = glds48_dataSystem_STAGE03
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, stage_names=STAGE.GeneCounted, dry_run=True
)
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
assert len(df) == 76
""" # DISABLED PENDING REWORK OF ARGS + CONFIG APPROACH
def test_bulkRNASeq_STAGE04_validation_paired(glds194_dataSystem_STAGE04):
ds = glds194_dataSystem_STAGE04
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, protocol_name="full", dry_run=True
)
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
assert len(df) == 98
assert df["flag_code"].max() < 90
def test_bulkRNASeq_STAGE04_validation_single(glds48_dataSystem_STAGE04):
ds = glds48_dataSystem_STAGE04
vv_protocol = BulkRNASeq_VVProtocol(
dataset=ds.dataset, protocol_name="full", dry_run=True
)
vv_protocol.validate_all()
df = vv_protocol.flags_to_df()
assert len(df) == 77
assert df["flag_code"].max() < 90
| StarcoderdataPython |
1602467 | <gh_stars>0
def sort(l, n):
for i in range(1, n):
temp, j = l[i], i
# while j>0 and temp<l[j-1]: # Asending Order
while j>0 and temp>l[j-1]: # Decending Order
l[j] = l[j-1]
j -= 1
l[j] = temp
if __name__ == "__main__":
# Inputs
numberList = [23, -6, -3, 7, 34, 9, 66, 23, 55, 1]
n = len(numberList)
# Unsorted List Output
print(numberList)
# Sorting List
sort(numberList, n)
# Sorted List Output
print(numberList)
| StarcoderdataPython |
3372477 | <reponame>younhapan/ystdoc
# coding=utf8
class UserContact():
phone = ''
owner_id = ''
created_time = None
phone_location = ''
created_at = None
name = ''
call_count = ''
device_id = ''
class UserPhoneCall():
phone = ''
phone_location = ''
location = ''
owner_id = ''
name = ''
created_time = None
calling_time = None
# unit: second
calling_duration = -1
# 0:呼入, 1:呼出, 2:未接, 3:挂断
type = -1
device_id = ''
class UserShortMessage():
phone = ''
phone_location = ''
source = ''
owner_id = ''
created_time = None
sending_time = None
name = ''
content = ''
# 0:呼入, 1:呼出, 2:发送失败或者发送中
type = -1
device_id = '' | StarcoderdataPython |
1756649 | <gh_stars>1-10
""" Coiflets 1 wavelet """
import numpy as np
class Coiflets1:
"""
Properties
----------
near symmetric, orthogonal, biorthogonal
All values are from http://wavelets.pybytes.com/wavelet/coif1/
"""
__name__ = "Coiflets Wavelet 1"
__motherWaveletLength__ = 6 # length of the mother wavelet
__transformWaveletLength__ = 2 # minimum wavelength of input signal
# decomposition filter
# low-pass
decompositionLowFilter = np.array([
-0.01565572813546454,
-0.0727326195128539,
0.38486484686420286,
0.8525720202122554,
0.3378976624578092,
-0.0727326195128539,
])
# high-pass
decompositionHighFilter = np.array([
0.0727326195128539,
0.3378976624578092,
-0.8525720202122554,
0.38486484686420286,
0.0727326195128539,
-0.01565572813546454,
])
# reconstruction filters
# low pass
reconstructionLowFilter = np.array([
-0.0727326195128539,
0.3378976624578092,
0.8525720202122554,
0.38486484686420286,
-0.0727326195128539,
-0.01565572813546454,
])
# high-pass
reconstructionHighFilter = np.array([
-0.01565572813546454,
0.0727326195128539,
0.38486484686420286,
-0.8525720202122554,
0.3378976624578092,
0.0727326195128539,
])
| StarcoderdataPython |
66841 | <reponame>tristan/blockies
from setuptools import setup
setup(
name='blockies',
version='0.0.3',
author='<NAME>',
author_email='<EMAIL>',
py_modules=['blockies'],
url='http://github.com/tristan/blockies',
description='A tiny library for generating blocky identicons.',
long_description_content_type="text/markdown",
long_description=open('README.md').read(),
install_requires=open('requirements.txt').read()
)
| StarcoderdataPython |
1776684 | import functools
import operator
from chainer import initializers
from chainer import link
from chainer import variable
import chainer.functions as F
from persistent_memory_function import persistent_memory
class PersistentMemory(link.Chain):
def __init__(self, in_size, slot_size, memory_size, initialW=None):
"""
in_size: hidden_state h_size
"""
super().__init__()
self.slot_size = slot_size
self.memory_size = memory_size
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.memory = variable.Parameter(W_initializer)
self.projection_matrix = variable.Parameter(W_initializer)
def _initialize_params(self, in_size):
self.memory.initialize((self.memory_size, self.slot_size))
self.projection_matrix.initialize((in_size, self.memory_size))
def calculate_memory_weight(self, in_size, hidden_state):
# print("hidden_state", hidden_state)
DM = F.matmul(self.projection_matrix, self.memory) # (in_size, slot_size)
# print("DM----", DM)
sim = F.matmul(hidden_state, DM) # (batch_size, slot_size)
# print("sim----", sim)
n_batch, n_slot = sim.shape
normed_hidden = F.reshape(F.batch_l2_norm_squared(hidden_state), (-1, 1))
sim = F.exp(F.log(1+sim) - F.log(1+F.tile(normed_hidden, (1, n_slot))))
# sim /= F.tile(normed_hidden, (1, n_slot)) # (batch_size, slot_size)/(batch_size,)
sim = F.exp(F.log(1+sim) - F.log(1+F.tile(F.sum(DM*DM, axis=0), (n_batch, 1))))
# sim /= F.tile(
# F.sum(DM*DM, axis=0), (n_batch, 1)) # (batch_size, slot_size)/(slot_size,)
return F.softmax(sim) # (batch_size, slot_size)
def __call__(self, x):
in_size = None
if self.memory.data is None:
in_size = functools.reduce(operator.mul, x.shape[1:], 1)
self._initialize_params(in_size)
self.weight = self.calculate_memory_weight(in_size, x)
n_batch, n_slot = self.weight.shape
n_memory, _ = self.memory.shape
# (batch_size, slot_size)*(memory_size, slot_size)
wm = F.reshape(
F.tile(self.weight, (1, n_memory)), (-1, n_memory, n_slot)) \
* F.tile(self.memory, (n_batch, 1, 1))
return F.sum(wm, axis=2)
| StarcoderdataPython |
120299 | <filename>tests/test_upload_and_restore.py
import filecmp
import os
import tempfile
import threading
from concurrent import futures
import grpc
import pytest
from pysrbup.backup_system_pb2_grpc import (BackupStub,
add_BackupServicer_to_server)
from pysrbup.client import BackupClient
from pysrbup.server import BackupServicer, create_dictionary
OPTIONS = [('grpc.max_send_message_length', 1024**3),
('grpc.max_receive_message_length', 1024**3)]
SERVER_ADDRESS = '127.0.0.1:50000'
THREADS = 2
def start_server():
with tempfile.TemporaryDirectory() as backup_dir:
dictionary_file = create_dictionary(backup_dir)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=THREADS),
options=OPTIONS)
add_BackupServicer_to_server(
BackupServicer(backup_dir, dictionary_file), server)
server.add_insecure_port(SERVER_ADDRESS)
server.start()
server.wait_for_termination()
@pytest.fixture
def server_fixture():
server_thread = threading.Thread(target=start_server, daemon=True)
server_thread.start()
@pytest.fixture
def client_fixture():
channel = grpc.insecure_channel(SERVER_ADDRESS, options=OPTIONS)
stub = BackupStub(channel)
return BackupClient(stub)
# pylint: disable=unused-argument,redefined-outer-name
def test_upload_and_restore(server_fixture, client_fixture):
key = client_fixture.generate_key()
dir_to_backup = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data')
backup_id = client_fixture.upload_backup(dir_to_backup, key, THREADS)
with tempfile.TemporaryDirectory() as restore_to_dir:
client_fixture.restore_backup(backup_id, restore_to_dir, key)
assert are_equal_dirs(
dir_to_backup,
os.path.join(restore_to_dir, os.path.basename(dir_to_backup)))
def are_equal_dirs(dir1, dir2):
comp_obj = filecmp.dircmp(dir1, dir2)
if len(comp_obj.left_only) > 0 or len(comp_obj.right_only) > 0:
return False
common_dirs = comp_obj.common_dirs
comp_result = filecmp.cmpfiles(dir1, dir2, comp_obj.common_files)
return (not comp_result[1] and not comp_result[2]) and all(
are_equal_dirs(os.path.join(dir1, common_dirs[i]),
os.path.join(dir2, common_dirs[i]))
for i in range(len(common_dirs)))
| StarcoderdataPython |
115325 | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
#
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
#
"""
MlBernoulliNB
-------------
A machine learning model that uses the scikit-learn Bernoulli Naive Bayes algorithm.
https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.BernoulliNB.html
Good for binary/boolean features.
Note for imbalanced dataset handling. This BernoulliNB does not support class_weight.
Use upsampling.
"""
from sklearn.naive_bayes import BernoulliNB
from fn_machine_learning.lib.ml_model_common import MlModelCommon
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
import pandas as pds
class MlBernoulliNB(MlModelCommon, BernoulliNB):
def __init__(self, imbalance_upsampling=None, class_weight=None, method=None, c=100.0, random_state=1, log=None):
MlModelCommon.__init__(self,
imbalance_upsampling=imbalance_upsampling,
class_weight=class_weight,
method=method,
log=log)
if method == "Bagging":
model = BernoulliNB()
self.ensemble_method = BaggingClassifier(base_estimator=model,
n_estimators=10,
random_state=random_state)
elif method == "Adaptive Boosting":
model = BernoulliNB()
self.ensemble_method = AdaBoostClassifier(base_estimator=model,
n_estimators=10,
random_state=random_state)
else:
#
# BernoulliNB does not support class_weight
#
BernoulliNB.__init__(self)
self.ensemble_method = None
@staticmethod
def get_name():
return "BernoulliNB"
def build(self, csv_file, features, prediction, test_prediction, unwanted_values=None):
"""
This method builds a Bernoulli Naive Bayes
http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.BernoulliNB.html
model.
:param csv_file: CSV file with samples
:param features: features to use
:param prediction: field to predict
:param test_prediction: how to split trainng/testing samples
:param unwanted_values: Unwanted values for samples. Those samples will be removed
:return:
"""
try:
self.extract_csv(csv_file, features, prediction)
#
# Cleanup samples by removing samples with empty
# features and unwanted values
#
self.cleanup_samples(unwanted_values=unwanted_values)
self.transform_numerical()
self.split_samples(test_prediction)
#
# One way to compensate imbalance class is to do upsampling. Do
# it if user specified this in
#
self.upsample_if_necessary()
if len(self.y_train) > 0:
self.config.number_samples = len(self.y_train) + len(self.y_test)
self.log.info("Using {} samples to train. ".format(len(self.y_train)))
if self.ensemble_method is not None:
self.ensemble_method.fit(self.X_train, self.y_train)
else:
self.fit(self.X_train, self.y_train)
#
# Test model
#
if self.ensemble_method is not None:
y_predict = self.ensemble_method.predict(self.X_test)
else:
y_predict = self.predict(self.X_test)
self.compute_accuracy(predict=y_predict,
actual=self.y_test)
else:
self.log.info("No samples to train the model")
except Exception as e:
self.log.exception(str(e))
raise e
def predict_result(self, input):
"""
Input is a dict
:param input:
:return:
"""
df = pds.DataFrame([input])
#
# We only care about the features
#
df = df[self.config.selected_features]
df = self.transform_for_prediction(df)
self.log.info("Using df {} to predict. ".format(str(df)))
if self.ensemble_method is not None:
ret = self.ensemble_method.predict(df)
else:
ret = self.predict(df)
return ret
| StarcoderdataPython |
27513 | import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import geocat.viz.util as gvutil
path = r'H:\Python project 2021\climate_data_analysis_with_python\data\sst.mnmean.nc'
ds= xr.open_dataset(path)
# time slicing
sst = ds.sst.sel(time=slice('1920-01-01','2020-12-01'))
# anomaly with respect to 1971-2000 period
clm = ds.sst.sel(time=slice('1971-01-01','2000-12-01')).groupby('time.month').mean(dim='time')
anm = (sst.groupby('time.month') - clm)
time = anm.time
itime=np.arange(time.size)
def wgt_areaave(indat, latS, latN, lonW, lonE):
lat=indat.lat
lon=indat.lon
if ( ((lonW < 0) or (lonE < 0 )) and (lon.values.min() > -1) ):
anm=indat.assign_coords(lon=( (lon + 180) % 360 - 180) )
lon=( (lon + 180) % 360 - 180)
else:
anm=indat
iplat = lat.where( (lat >= latS ) & (lat <= latN), drop=True)
iplon = lon.where( (lon >= lonW ) & (lon <= lonE), drop=True)
# print(iplat)
# print(iplon)
wgt = np.cos(np.deg2rad(lat))
odat=anm.sel(lat=iplat,lon=iplon).weighted(wgt).mean(("lon", "lat"), skipna=True)
return(odat)
# bob sst
bob_anm = wgt_areaave(anm,5,25,80,100)
bob_ranm = bob_anm.rolling(time=7, center=True).mean('time')
##
# Create a list of colors based on the color bar values
colors = ['C1' if (value > 0) else 'C0' for value in bob_anm]
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
# Plot bar chart
ax1.bar(itime, bob_anm, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, bob_ranm, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-1.5, 1),
yticks=np.linspace(-1.5, 1, 6),
yticklabels=np.linspace(-1.5, 1, 6),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr))
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle="SSTA in BoB (ERSST)",
ylabel='Anomalies',
xlabel= 'Year',
maintitlefontsize=18,
labelfontsize=15)
plt.tight_layout()
plt.savefig("bob_anomalies.png",dpi = 300)
########## BoB SST with respect to ENSO and IOD (ERSST)
#nino 3.4 and dipole mode index plot together
nino = wgt_areaave(anm,-5,5,-170,-120)
nino = nino.rolling(time=7, center=True).mean('time')
#IOD west: 50 ° E to 70 ° E and 10 ° S to 10 ° N.
iod_west = wgt_areaave(anm,-10,10,50,70)
# IOD east: 90 ° E to 110 ° E and 10 ° S to 0 ° S.
iod_east = wgt_areaave(anm,-10,0,90,110)
dmi = iod_west - iod_east
dmi = dmi.rolling(time=7, center=True).mean('time')
### Figure Plot
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.set_title('BoB anomaly with repect to ENSO')
ax1.plot(time, bob_ranm, '-', linewidth=1)
ax1.plot(time, nino, '-', linewidth=1)
ax1.tick_params(length = 7,right=True,labelsize=12)
ax1.legend(['BoB anomaly','Nino3.4 Index'],fontsize=12,frameon=False)
ax1.set_ylabel('SSTA (°C)',fontsize=12)
ax2.set_title('BoB anomaly with respect to IOD')
ax2.plot(time, bob_ranm, '-', linewidth=1)
ax2.plot(time, dmi, '-', linewidth=1)
ax2.tick_params(length = 7,right=True,labelsize=12)
ax2.legend(['BoB anomaly','Dipole Mode Index'],fontsize=12,frameon=False)
ax2.set_ylabel('SSTA (°C)',fontsize=12)
# Show the plot
plt.draw()
plt.tight_layout()
plt.savefig("nino-bob-dmi.png",dpi = 300)
####################### (Ploting Nino 3.4 Index)
nino = wgt_areaave(anm,-5,5,-170,-120)
rnino = nino.rolling(time=7, center=True).mean('time')
#nino standard
ninoSD=nino/nino.std(dim='time')
rninoSD=ninoSD.rolling(time=7, center=True).mean('time')
# -- -- -- -- -- -- -- -- - -- - -- --- -- - -- - -- - - -- - -
# -- figure plot
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
# Create a list of colors based on the color bar values
colors = ['C1' if (value > 0) else 'C0' for value in ninoSD]
# Plot bar chart
ax1.bar(itime, nino, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, rnino, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12,frameon=False)
ax2.bar(itime, ninoSD, align='edge', edgecolor="none", color=colors, width=1.0)
ax2.plot(itime, rninoSD, color="black", linewidth=1.5)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-3, 3.5),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr) )
gvutil.set_axes_limits_and_ticks(ax2,
ylim=(-3, 3.5),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr) )
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
gvutil.add_major_minor_ticks(ax2,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle="SSTA in Nino3.4 region",
ylabel='Anomalies',
maintitlefontsize=18,
labelfontsize=15)
gvutil.set_titles_and_labels(ax2,
maintitle="Nino3.4 Index",
ylabel='Standardized',
xlabel='Year',
maintitlefontsize=18,
labelfontsize=15)
plt.draw()
plt.tight_layout()
plt.savefig("nino3.4_ERSST.png",dpi=300)
############### (Ploting DMI Index)
iod_west = wgt_areaave(anm,-10,10,50,70)
# IOD east: 90 ° E to 110 ° E and 10 ° S to 0 ° S.
iod_east = wgt_areaave(anm,-10,0,90,110)
dmi = iod_west - iod_east
rdmi = dmi.rolling(time=7, center=True).mean('time')
colors = ['C1' if (value > 0) else 'C0' for value in dmi]
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
# Plot bar chart
ax1.bar(itime, dmi, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, rdmi, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12,frameon=False)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-1.5, 1.90),
yticks=np.linspace(-1, 1.5, 6),
yticklabels=np.linspace(-1, 1.5, 6),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr))
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle=" Dipole Mode Index",
ylabel='Anomalies',
xlabel= 'Year',
maintitlefontsize=18,
labelfontsize=15)
plt.tight_layout()
plt.savefig("dmi_ersst.png",dpi = 300)
### (Global vs BoB time Series -ERSST v5)
# global vs bob sst anomaly
glob_anom = anm.mean(('lon','lat'),skipna = True)
glob_anom_ra = glob_anom.rolling(time=12, center=True).mean('time')
bob_anm = wgt_areaave(anm,5,25,80,100)
bob_anm_ra = bob_anm.rolling(time=12, center=True).mean('time')
xr.corr(glob_anom_ra,bob_anm_ra)
# plot
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
ax1.set_title('Global SSTA & BOB SSTA with 1 year moving average (ERSST v5)')
ax1.plot(time, glob_anom_ra, '-', linewidth=1)
ax1.plot(time, bob_anm_ra, '-', linewidth=1)
ax1.tick_params(length = 7,right=True,labelsize=12)
ax1.legend(['Globally averaged','BoB averaged'],fontsize=12,frameon=False)
ax1.set_ylabel('SSTA (°C)',fontsize=12)
ax1.set_xlabel('Year',fontsize=12)
ax1.text(pd.to_datetime('1975-01-01'),-0.8,'Correlation Coefficient = 0.89',fontsize=12)
#ax1.axis(xmin=pd.Timestamp("1982-01"), xmax=pd.Timestamp("2020-12"))
# Show the plot
plt.draw()
plt.tight_layout()
plt.savefig("bobvsgloobalanom_ersst.png",dpi = 300)
| StarcoderdataPython |
3211149 | import os, csv, json, shutil
from data_tools.coco_tools import read_json
from PIL import Image
def reduce_data(oidata, catmid2name, keep_classes=[]):
"""
Reduce the amount of data by only keeping images that are in the classes we want.
:param oidata: oidata, as outputted by parse_open_images
:param catmid2name: catid2name dict, as produced by read_catMIDtoname
:param keep_classes: List of classes to be kept.
:return:
"""
print(" Reducing the dataset. Initial dataset has length", len(oidata))
# First build a dictionary of imageID:[classnames]
imageid2classmid = {}
for dd in oidata:
imageid = dd['ImageID']
if imageid not in imageid2classmid:
imageid2classmid[imageid] = [dd['LabelName']]
else:
imageid2classmid[imageid].append(dd['LabelName'])
# Work out which images we are including.
imageid2include = {} # dict to store True if this imageid is included.
for imgid, classmids in imageid2classmid.items():
imageid2include[imgid] = False # Assume we don't include this.
for mid in classmids:
this_name = catmid2name[mid]
if this_name in keep_classes:
imageid2include[imgid] = True
# Now work through list, appending if ImageID has imageid2include[imageid] = True
returned_data = []
for dd in oidata:
imageid = dd['ImageID']
if imageid2include[imageid]:
returned_data.append(dd)
print(" Reducing the dataset. Final dataset has length", len(returned_data))
return returned_data
def openimages2coco(oidata, catmid2name, img_dir, desc="", output_class_ids=None,
max_size=None, min_ann_size=None, min_ratio=0.0, min_width_for_ratio=400):
"""
Converts open images annotations into COCO format
:param raw: list of data items, as produced by parse_open_images
:return: COCO style dict
"""
output = {'info':
"Annotations produced from OpenImages. %s" % desc,
'licenses': [],
'images': [],
'annotations': [],
'categories': []} # Prepare output
# Get categories in this dataset
all_cats = []
for dd in oidata:
if dd['LabelName'] not in all_cats:
all_cats.append(dd['LabelName'])
categories = []
for mid in all_cats:
cat_name = catmid2name[mid]
if cat_name in output_class_ids:
categories.append({"id": output_class_ids[cat_name], "name": cat_name, "supercategory": 'object'})
output['categories'] = categories
# Get images
image_filename_to_id = {} # To store found images.
current_img_index = 0 #To incrementally add image IDs.
imgid2wh = {} # To store width and height
intermediate_images = [] # To store as if output
for dd in oidata:
filename = dd['ImageID'] + '.jpg'
if filename not in image_filename_to_id:
img_entry = _oidata_entry_to_image_dict(filename, current_img_index, img_dir)
image_filename_to_id[filename] = current_img_index
imgid2wh[current_img_index] = (img_entry['width'], img_entry['height'])
intermediate_images.append(img_entry)
current_img_index += 1
# Get annotations
ann_id = 1
imgid2_has_new_ann = {} # Use this to make sure that our images have valid annotations
new_anns_raw = [] # list of candidate annotations
for dd in oidata:
filename = dd['ImageID'] + '.jpg'
imgid = image_filename_to_id[filename]
cat_name = catmid2name[dd['LabelName']]
if cat_name in output_class_ids:
catid = output_class_ids[cat_name]
w, h = imgid2wh[imgid]
bbox, area, seg = _ann2bbox(dd, w, h)
ann_entry = {'id': ann_id, 'image_id': imgid, 'category_id': catid,
'segmentation': seg,
'area': area,
'bbox': bbox,
'iscrowd': 0}
# Check if we want to include this annotation
include_this_annotation = True
x, y, ann_w, ann_h = bbox
if max_size:
maxdim = max(w, h)
ann_w = ann_w * (max_size / float(maxdim))
ann_h = ann_h * (max_size / float(maxdim))
if min_ann_size is not None:
if ann_w < min_ann_size[0]:
include_this_annotation = False
if ann_h < min_ann_size[1]:
include_this_annotation = False
# Now check whether this annotation exceeds the ratio requriements, if any.
if min_ratio > 0:
try:
ratio = float(w) / float(h)
except ZeroDivisionError:
include_this_annotation = False
else:
if ratio >= min_ratio and w >= min_width_for_ratio:
include_this_annotation = False
if include_this_annotation:
new_anns_raw.append(ann_entry)
imgid2_has_new_ann[imgid] = True
ann_id += 1
# Now we must review all of the images and only keep those where imgid2_has_new_ann[imgid] = True
new_imgs_raw = []
for img in intermediate_images:
if img['id'] in imgid2_has_new_ann:
new_imgs_raw.append(img)
# Now we assign new image_ids to the images, mapping old to new
old_img2new_img = {}
new_imgs = []
for indx, img in enumerate(new_imgs_raw):
old_img2new_img[img['id']] = indx + 1
img['id'] = indx + 1
new_imgs.append(img)
output['images'] = new_imgs
# Now we assing new ann_ids to the annotations, also updating the image ID
new_anns = []
for indx, ann in enumerate(new_anns_raw):
ann['id'] = indx + 1
ann['image_id'] = old_img2new_img[ann['image_id']]
new_anns.append(ann)
output['annotations'] = new_anns
return output
def read_catMIDtoname(csv_file):
catmid2name = {}
assert os.path.isfile(csv_file), "File %s does not exist." % csv_file
rows_read = 0
with open(csv_file) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
mid = row[0]
name = row[1]
catmid2name[mid] = name
rows_read += 1
print(" Read", rows_read, "rows from category csv", csv_file)
return catmid2name
def parse_open_images(annotation_csv):
"""
Parse open images and produce a list of annotations.
:param annotation_csv:
:return:
"""
annotations = []
assert os.path.isfile(annotation_csv), "File %s does not exist." % annotation_csv
expected_header = ['ImageID', 'Source', 'LabelName', 'Confidence', 'XMin', 'XMax', 'YMin', 'YMax', 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction', 'IsInside']
rows_read = 0
with open(annotation_csv) as csvfile:
reader = csv.reader(csvfile)
header = next(reader)
for ii, hh in enumerate(header):
assert hh == expected_header[ii], "File header is not as expected."
for row in reader:
ann = parse_open_images_row(row, header)
annotations.append(ann)
rows_read += 1
# if rows_read > 10:
# print("DEBUG: Only reading 11 rows.")
# break
print(" Read", rows_read, "rows from annotation csv", annotation_csv)
return annotations
def parse_open_images_row(row, header):
"""Parse open images row, returning a dict
Format of dict (str unless otherwise specified)
ImageID: Image ID of the box.
Source: Indicateds how the box was made.
xclick are manually drawn boxes using the method presented in [1].
activemil are boxes produced using an enhanced version of the method [2]. These are human verified to be accurate at IoU>0.7.
LabelName: MID of the object class
Confidence: Always 1 (here True)
XMin, XMax, YMin, YMax: coordinates of the box, in normalized image coordinates. (FLOAT)
XMin is in [0,1], where 0 is the leftmost pixel, and 1 is the rightmost pixel in the image.
Y coordinates go from the top pixel (0) to the bottom pixel (1).
For each of them, value 1 indicates present, 0 not present, and -1 unknown. (INT)
IsOccluded: Indicates that the object is occluded by another object in the image.
IsTruncated: Indicates that the object extends beyond the boundary of the image.
IsGroupOf: Indicates that the box spans a group of objects (e.g., a bed of flowers or a crowd of people). We asked annotators to use this tag for cases with more than 5 instances which are heavily occluding each other and are physically touching.
IsDepiction: Indicates that the object is a depiction (e.g., a cartoon or drawing of the object, not a real physical instance).
IsInside: Indicates a picture taken from the inside of the object (e.g., a car interior or inside of a building).
"""
ann = {}
for ii, hh in enumerate(header):
if hh in ['XMin', 'XMax', 'YMin', 'YMax']:
ann[hh] = float(row[ii])
elif hh in ['Confidence', 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction', 'IsInside']:
ann[hh] = int(row[ii])
else: # str
ann[hh] = row[ii]
return ann
def copy_images(json_file, original_image_dirs, new_image_dir):
"""Copy files from original_image_dirs to new_iamge_dirs"""
if type(original_image_dirs) is not list:
original_image_dirs = [original_image_dirs]
# Open JSON file and get list of images
annotations = read_json(json_file, verbose=False)
image_filenames = [ann['file_name'] for ann in annotations['images']]
for img in image_filenames:
for img_d in original_image_dirs:
orig = os.path.join(img_d, img)
if not os.path.isfile(orig):
continue
new = os.path.join(new_image_dir, img)
# Copy
shutil.copy(orig, new)
print("All %i images in %s copied to %s" % (len(image_filenames), json_file, new_image_dir))
def _oidata_entry_to_image_dict(filename, indx, img_dir):
width, height = _get_img_width_height(filename, img_dir)
return {'id': indx, 'width': width, 'height': height, 'file_name': filename,
'license': None, 'flickr_url': None, 'coco_url': None, 'date_captured': None}
def _get_img_width_height(filename, img_dir):
# Modified to deal with img_dir as a list.
if not type(img_dir) == list:
img_dir = [img_dir]
for img_d in img_dir:
filepath = os.path.join(img_d, filename)
try:
image = Image.open(filepath).convert("RGB")
except FileNotFoundError:
pass
else:
return image.size
raise FileNotFoundError("Image %s not found in any of img_dir" % filename)
def _ann2bbox(dd, img_width, img_height):
xmin = dd['XMin'] * img_width
xmax = dd['XMax'] * img_width
ymin = dd['YMin'] * img_height
ymax = dd['YMax'] * img_height
seg = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
w = xmax - xmin
h = ymax - ymin
bbox = [xmin, ymin, w, h]
return bbox, w * h, seg
| StarcoderdataPython |
141620 | from PyQt5 import QtCore
from PyQt5.Qt3DCore import *
from PyQt5.Qt3DExtras import *
from PyQt5.Qt3DRender import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class RenderWidget(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.view = Qt3DWindow()
self.widget = QWidget.createWindowContainer(self.view, self)
self.scene = QEntity()
# self.picker = QObjectPicker(self.scene)
# self.picker.setHoverEnabled(True)
# self.picker.setDragEnabled(True)
# self.scene.addComponent(self.picker)
# camera
self.camera: QCamera = self.view.camera()
self.camera.lens().setPerspectiveProjection(45.0, 16.0 / 9.0, 0.1, 1000)
self.camera.setPosition(QVector3D(0, 0, 1))
self.camera.setNearPlane(0.01)
self.camera.setViewCenter(QVector3D(0, 0, 0))
# for camera control
camController = QFirstPersonCameraController(self.scene)
camController.setCamera(self.camera)
self.view.setRootEntity(self.scene)
layout = QVBoxLayout()
layout.addWidget(self.widget)
self.setLayout(layout)
renderSettings: QRenderSettings = self.view.renderSettings()
renderCapabilities: QRenderCapabilities = renderSettings.renderCapabilities()
print("renderSettings :", renderSettings.activeFrameGraph())
print("renderPolicy :", renderSettings.renderPolicy())
print("renderCapabilities.profile:", renderCapabilities.profile())
# picking_settings: QPickingSettings = render_settings.pickingSettings()
# picking_settings.setFaceOrientationPickingMode(QPickingSettings.FrontFace)
# picking_settings.setPickMethod(QPickingSettings.BoundingVolumePicking)
# picking_settings.setPickResultMode(QPickingSettings.NearestPick)
# self.picker.pressed.connect(self.clicked)
# self.picker.clicked.connect(self.clicked)
# self.picker.moved.connect(self.clicked)
def setScene(self, sceneFn):
c = self.scene.children()
for e in c:
e.deleteLater()
sceneFn(self.scene)
camController = QOrbitCameraController(self.scene)
camController.setLinearSpeed(2.0)
camController.setLookSpeed(2.0)
camController.setZoomInLimit(0.25)
camController.setAcceleration(5)
camController.setDeceleration(10)
camController.setCamera(self.camera)
def sizeHint(self) -> QtCore.QSize:
return QSize(800, 600)
def clicked(self, event: QPickEvent, *args, **kwargs):
print("clicked - not implemented", event.objectName()) | StarcoderdataPython |
3267279 | <reponame>godontop/python-work<gh_stars>0
import re
pattern = r"gr.y"
# .(dot) matches any charactor(just one charactor)
if re.match(pattern, "grey"):
print("Match 1")
if re.match(pattern, "gray"):
print("Match 2")
if re.match(pattern, "gr$y"):
print("Match 3")
if re.match(pattern, "blue"):
print("Match 4")
pattern = r"^gr.y$"
if re.match(pattern, "grey"):
print("Match 5")
if re.match(pattern, "gray"):
print("Match 6")
if re.match(pattern, "stingray"):
print("Match 7")
| StarcoderdataPython |
1714930 | import sys
from util import print_error,print_log,print_result
command = sys.argv[1]
if __name__ == '__main__':
try:
print("Calling creating loadbalancer service script")
from google_load_balancer import main
main(command)
except Exception as e:
print e
f = open('FAILURE', 'w')
f.write(str(e))
f.close()
| StarcoderdataPython |
96681 | <reponame>hockeyprincess/google-api-dfp-python
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates custom handler for SOAPpy.Client.HTTPTransport."""
__author__ = '<EMAIL> (<NAME>)'
from SOAPpy import __version__
from SOAPpy import Client
from SOAPpy import SOAPAddress
from SOAPpy import Utilities
from SOAPpy.Config import Config
from SOAPpy.Errors import HTTPError
import base64
import httplib
class HTTPTransportHandler(Client.HTTPTransport):
"""Implements HTTPTransportHandler."""
data_injects = ()
def call(self, addr, data, namespace, soapaction=None, encoding=None,
http_proxy=None, config=Config):
"""Inits HttpConnectionHandler."""
if not isinstance(addr, SOAPAddress):
addr = SOAPAddress(addr, config)
# Build a request.
if http_proxy:
real_addr = http_proxy
real_path = addr.proto + "://" + addr.host + addr.path
else:
real_addr = addr.host
real_path = addr.path
if addr.proto == 'httpg':
from pyGlobus.io import GSIHTTP
r = GSIHTTP(real_addr, tcpAttr = config.tcpAttr)
elif addr.proto == 'https':
r = httplib.HTTPS(real_addr)
else:
r = httplib.HTTP(real_addr)
# Intercept outgoing XML message and inject data.
for old, new in HTTPTransportHandler.data_injects:
data = data.replace(old, new)
r.putrequest('POST', real_path)
r.putheader('Host', addr.host)
r.putheader('User-agent', Client.SOAPUserAgent())
t = 'text/xml';
if encoding != None:
t += '; charset="%s"' % encoding
r.putheader('Content-type', t)
r.putheader('Content-length', str(len(data)))
# If user is not a user:passwd format we'll receive a failure from the
# server. . .I guess (??)
if addr.user != None:
val = base64.encodestring(addr.user)
r.putheader('Authorization', 'Basic ' + val.replace('\012',''))
# This fixes sending either "" or "None".
if soapaction is None or len(soapaction) == 0:
r.putheader('SOAPAction', '')
else:
r.putheader('SOAPAction', '"%s"' % soapaction)
if config.dumpHeadersOut:
s = 'Outgoing HTTP headers'
Utilities.debugHeader(s)
print 'POST %s %s' % (real_path, r._http_vsn_str)
print 'Host:', addr.host
print 'User-agent: SOAPpy %s (http://pywebsvcs.sf.net)' % __version__
print 'Content-type:', t
print 'Content-length:', len(data)
print 'SOAPAction: "%s"' % soapaction
Utilities.debugFooter(s)
r.endheaders()
if config.dumpSOAPOut:
s = 'Outgoing SOAP'
Utilities.debugHeader(s)
print data,
if data[-1] != '\n':
print
Utilities.debugFooter(s)
# Send the payload.
r.send(data)
# Read response line.
code, msg, headers = r.getreply()
if headers:
content_type = headers.get('content-type', 'text/xml')
content_length = headers.get('Content-length')
else:
content_type=None
content_length=None
# Work around OC4J bug which does '<len>, <len>' for some reaason.
if content_length:
comma=content_length.find(',')
if comma>0:
content_length = content_length[:comma]
# attempt to extract integer message size
try:
message_len = int(content_length)
except:
message_len = -1
if message_len < 0:
# Content-Length missing or invalid; just read the whole socket. This
# won't work with HTTP/1.1 chunked encoding.
data = r.getfile().read()
message_len = len(data)
else:
data = r.getfile().read(message_len)
if(config.debug):
print 'code=', code
print 'msg=', msg
print 'headers=', headers
print 'content-type=', content_type
print 'data=', data
if config.dumpHeadersIn:
s = 'Incoming HTTP headers'
Utilities.debugHeader(s)
if headers.headers:
print 'HTTP/1.? %d %s' % (code, msg)
print '\n'.join(map (lambda x: x.strip(), headers.headers))
else:
print 'HTTP/0.9 %d %s' % (code, msg)
Utilities.debugFooter(s)
def startswith(string, val):
return string[0:len(val)] == val
if (code == 500 and
not (startswith(content_type, 'text/xml') and message_len > 0)):
raise HTTPError(code, msg)
if config.dumpSOAPIn:
s = 'Incoming SOAP'
Utilities.debugHeader(s)
print data,
if (len(data)>0) and (data[-1] != '\n'):
print
Utilities.debugFooter(s)
if code not in (200, 500):
raise HTTPError(code, msg)
# Get the new namespace.
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
# Return response payload.
return data, new_ns
| StarcoderdataPython |
75805 | from importlib import import_module
from os import environ
environment_name = environ.get('ENVIRONMENT_NAME', 'dev')
config = import_module('app.config.{}'.format(environment_name)).CONFIG
def get_config():
return config
| StarcoderdataPython |
88823 | '''
Title : Day 10: Binary Numbers
Domain : Tutorials
Author : <NAME>
Created : 03 April 2019
'''
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input())
b = str(bin(n))[2:]
l = len(b)
max_1 = 0
i = 0
while i < l:
count = 0
while i < l and b[i] == "1":
count += 1
i += 1
i += 1
max_1 = max(count, max_1)
print(max_1)
| StarcoderdataPython |
33410 | <filename>examples/example_interactive_prefix/main.py
"""This example read the settings and print them out, so you can check how they get loaded.
"""
# noinspection PyUnresolvedReferences,PyPackageRequirements
from settings import my_settings as settings
print("Redis Host:", settings.redis_host)
print("Redis Port:", settings.redis_port)
print("Redis Port / 2:", settings.redis_port / 2) # Show that the Port is int
print("API Port:", settings.api_port)
print("API Port * 2:", settings.api_port * 2) # Show that the Port is int
print("API Password:", settings.api_password)
print("API Token:", settings.api_token)
| StarcoderdataPython |
3374880 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# _version.py
"""Package initialization for DVHA-Analytics."""
# Copyright (c) 2020 <NAME>
# This file is part of DVH Analytics
# See the file LICENSE included with this distribution, also
# available at https://github.com/cutright/DVH-Analytics
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.9.7"
__release__ = "0.9.7"
__version_info__ = (0, 9, 7)
| StarcoderdataPython |
1716555 | # -*- coding: utf-8 -*-
"""BioImageIT formats reader service provider.
This module implement the runner service provider
Classes
-------
RunnerServiceProvider
"""
from ._plugins._csv import (TableCSVServiceBuilder, ArrayCSVServiceBuilder, NumberCSVServiceBuilder)
from ._plugins._imagetiff import ImagetiffServiceBuilder
from ._plugins._movietxt import MovietxtServiceBuilder
from ._plugins._trackmate import TrackmateModelServiceBuilder
class ObjectFactory:
"""Agnostic factory
Implements the factory design pattern
"""
def __init__(self):
self._builders = {}
def register_builder(self, key, builder):
"""Add a new service builder to the factory"""
self._builders[key] = builder
def create(self, key, **kwargs):
"""Create a new service builder"""
builder = self._builders.get(key)
if not builder:
raise ValueError(key)
return builder(**kwargs)
class FormatsReaderServiceProvider(ObjectFactory):
"""Service provider for the formats readers"""
def get(self, service_id, **kwargs):
return self.create(service_id, **kwargs)
formatsServices = FormatsReaderServiceProvider()
formatsServices.register_builder('imagetiff', ImagetiffServiceBuilder())
formatsServices.register_builder('movietxt', MovietxtServiceBuilder())
formatsServices.register_builder('tablecsv', TableCSVServiceBuilder())
formatsServices.register_builder('arraycsv', ArrayCSVServiceBuilder())
formatsServices.register_builder('numbercsv', NumberCSVServiceBuilder())
formatsServices.register_builder('trackmatemodel', TrackmateModelServiceBuilder())
| StarcoderdataPython |
3231134 | #
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import sentry_common_service.ttypes
import sentry_policy_service.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TSentryGrantOption(object):
TRUE = 1
FALSE = 0
UNSET = -1
_VALUES_TO_NAMES = {
1: "TRUE",
0: "FALSE",
-1: "UNSET",
}
_NAMES_TO_VALUES = {
"TRUE": 1,
"FALSE": 0,
"UNSET": -1,
}
class TAuthorizable(object):
"""
Attributes:
- type
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'type', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
)
def __init__(self, type=None, name=None,):
self.type = type
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.type = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAuthorizable')
if self.type is not None:
oprot.writeFieldBegin('type', TType.STRING, 1)
oprot.writeString(self.type)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.type is None:
raise TProtocol.TProtocolException(message='Required field type is unset!')
if self.name is None:
raise TProtocol.TProtocolException(message='Required field name is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.type)
value = (value * 31) ^ hash(self.name)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSentryPrivilege(object):
"""
Attributes:
- component
- serviceName
- authorizables
- action
- createTime
- grantorPrincipal
- grantOption
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'component', None, None, ), # 1
(2, TType.STRING, 'serviceName', None, None, ), # 2
(3, TType.LIST, 'authorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 3
(4, TType.STRING, 'action', None, None, ), # 4
(5, TType.I64, 'createTime', None, None, ), # 5
(6, TType.STRING, 'grantorPrincipal', None, None, ), # 6
(7, TType.I32, 'grantOption', None, 0, ), # 7
)
def __init__(self, component=None, serviceName=None, authorizables=None, action=None, createTime=None, grantorPrincipal=None, grantOption=thrift_spec[7][4],):
self.component = component
self.serviceName = serviceName
self.authorizables = authorizables
self.action = action
self.createTime = createTime
self.grantorPrincipal = grantorPrincipal
self.grantOption = grantOption
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.authorizables = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = TAuthorizable()
_elem5.read(iprot)
self.authorizables.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.action = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.createTime = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.grantorPrincipal = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.grantOption = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TSentryPrivilege')
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 1)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 2)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.authorizables is not None:
oprot.writeFieldBegin('authorizables', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.authorizables))
for iter6 in self.authorizables:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.action is not None:
oprot.writeFieldBegin('action', TType.STRING, 4)
oprot.writeString(self.action)
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I64, 5)
oprot.writeI64(self.createTime)
oprot.writeFieldEnd()
if self.grantorPrincipal is not None:
oprot.writeFieldBegin('grantorPrincipal', TType.STRING, 6)
oprot.writeString(self.grantorPrincipal)
oprot.writeFieldEnd()
if self.grantOption is not None:
oprot.writeFieldBegin('grantOption', TType.I32, 7)
oprot.writeI32(self.grantOption)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
if self.authorizables is None:
raise TProtocol.TProtocolException(message='Required field authorizables is unset!')
if self.action is None:
raise TProtocol.TProtocolException(message='Required field action is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(frozenset(self.authorizables))
value = (value * 31) ^ hash(self.action)
value = (value * 31) ^ hash(self.createTime)
value = (value * 31) ^ hash(self.grantorPrincipal)
value = (value * 31) ^ hash(self.grantOption)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TCreateSentryRoleRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TCreateSentryRoleRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TCreateSentryRoleResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TCreateSentryRoleResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDropSentryRoleRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDropSentryRoleRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDropSentryRoleResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDropSentryRoleResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleAddGroupsRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- groups
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.SET, 'groups', (TType.STRING,None), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, groups=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.groups = groups
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.SET:
self.groups = set()
(_etype10, _size7) = iprot.readSetBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readString()
self.groups.add(_elem12)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleAddGroupsRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 5)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter13 in self.groups:
oprot.writeString(iter13)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.groups is None:
raise TProtocol.TProtocolException(message='Required field groups is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.groups)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleAddGroupsResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleAddGroupsResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleDeleteGroupsRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- groups
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.SET, 'groups', (TType.STRING,None), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, groups=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.groups = groups
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.SET:
self.groups = set()
(_etype17, _size14) = iprot.readSetBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString()
self.groups.add(_elem19)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleDeleteGroupsRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 5)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter20 in self.groups:
oprot.writeString(iter20)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.groups is None:
raise TProtocol.TProtocolException(message='Required field groups is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.groups)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleDeleteGroupsResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleDeleteGroupsResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleGrantPrivilegeRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- privilege
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.STRUCT, 'privilege', (TSentryPrivilege, TSentryPrivilege.thrift_spec), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, privilege=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.privilege = privilege
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.privilege = TSentryPrivilege()
self.privilege.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleGrantPrivilegeRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.privilege is not None:
oprot.writeFieldBegin('privilege', TType.STRUCT, 5)
self.privilege.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.privilege is None:
raise TProtocol.TProtocolException(message='Required field privilege is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.privilege)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleGrantPrivilegeResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleGrantPrivilegeResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleRevokePrivilegeRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- privilege
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.STRUCT, 'privilege', (TSentryPrivilege, TSentryPrivilege.thrift_spec), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, privilege=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.privilege = privilege
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.privilege = TSentryPrivilege()
self.privilege.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleRevokePrivilegeRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.privilege is not None:
oprot.writeFieldBegin('privilege', TType.STRUCT, 5)
self.privilege.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.privilege is None:
raise TProtocol.TProtocolException(message='Required field privilege is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.privilege)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleRevokePrivilegeResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleRevokePrivilegeResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryRolesRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- groupName
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'groupName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, groupName=None, component=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.groupName = groupName
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.groupName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryRolesRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.groupName is not None:
oprot.writeFieldBegin('groupName', TType.STRING, 3)
oprot.writeString(self.groupName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.groupName)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSentryRole(object):
"""
Attributes:
- roleName
- groups
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'roleName', None, None, ), # 1
(2, TType.SET, 'groups', (TType.STRING,None), None, ), # 2
)
def __init__(self, roleName=None, groups=None,):
self.roleName = roleName
self.groups = groups
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.groups = set()
(_etype24, _size21) = iprot.readSetBegin()
for _i25 in xrange(_size21):
_elem26 = iprot.readString()
self.groups.add(_elem26)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TSentryRole')
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 1)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 2)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter27 in self.groups:
oprot.writeString(iter27)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.groups is None:
raise TProtocol.TProtocolException(message='Required field groups is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(frozenset(self.groups))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryRolesResponse(object):
"""
Attributes:
- status
- roles
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
(2, TType.SET, 'roles', (TType.STRUCT,(TSentryRole, TSentryRole.thrift_spec)), None, ), # 2
)
def __init__(self, status=None, roles=None,):
self.status = status
self.roles = roles
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.roles = set()
(_etype31, _size28) = iprot.readSetBegin()
for _i32 in xrange(_size28):
_elem33 = TSentryRole()
_elem33.read(iprot)
self.roles.add(_elem33)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryRolesResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.roles is not None:
oprot.writeFieldBegin('roles', TType.SET, 2)
oprot.writeSetBegin(TType.STRUCT, len(self.roles))
for iter34 in self.roles:
iter34.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.roles)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- serviceName
- authorizables
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.STRING, 'serviceName', None, None, ), # 5
(6, TType.LIST, 'authorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 6
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, serviceName=None, authorizables=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.serviceName = serviceName
self.authorizables = authorizables
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.authorizables = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in xrange(_size35):
_elem40 = TAuthorizable()
_elem40.read(iprot)
self.authorizables.append(_elem40)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 5)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.authorizables is not None:
oprot.writeFieldBegin('authorizables', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.authorizables))
for iter41 in self.authorizables:
iter41.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(self.authorizables)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesResponse(object):
"""
Attributes:
- status
- privileges
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
(2, TType.SET, 'privileges', (TType.STRUCT,(TSentryPrivilege, TSentryPrivilege.thrift_spec)), None, ), # 2
)
def __init__(self, status=None, privileges=None,):
self.status = status
self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.privileges = set()
(_etype45, _size42) = iprot.readSetBegin()
for _i46 in xrange(_size42):
_elem47 = TSentryPrivilege()
_elem47.read(iprot)
self.privileges.add(_elem47)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.SET, 2)
oprot.writeSetBegin(TType.STRUCT, len(self.privileges))
for iter48 in self.privileges:
iter48.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.privileges)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDropPrivilegesRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- privilege
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRUCT, 'privilege', (TSentryPrivilege, TSentryPrivilege.thrift_spec), None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, privilege=None, component=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.privilege = privilege
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.privilege = TSentryPrivilege()
self.privilege.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDropPrivilegesRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.privilege is not None:
oprot.writeFieldBegin('privilege', TType.STRUCT, 3)
self.privilege.write(oprot)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.privilege is None:
raise TProtocol.TProtocolException(message='Required field privilege is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.privilege)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDropPrivilegesResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDropPrivilegesResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRenamePrivilegesRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- component
- serviceName
- oldAuthorizables
- newAuthorizables
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'component', None, None, ), # 3
(4, TType.STRING, 'serviceName', None, None, ), # 4
(5, TType.LIST, 'oldAuthorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 5
(6, TType.LIST, 'newAuthorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 6
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, component=None, serviceName=None, oldAuthorizables=None, newAuthorizables=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.component = component
self.serviceName = serviceName
self.oldAuthorizables = oldAuthorizables
self.newAuthorizables = newAuthorizables
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.oldAuthorizables = []
(_etype52, _size49) = iprot.readListBegin()
for _i53 in xrange(_size49):
_elem54 = TAuthorizable()
_elem54.read(iprot)
self.oldAuthorizables.append(_elem54)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.newAuthorizables = []
(_etype58, _size55) = iprot.readListBegin()
for _i59 in xrange(_size55):
_elem60 = TAuthorizable()
_elem60.read(iprot)
self.newAuthorizables.append(_elem60)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRenamePrivilegesRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 3)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 4)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.oldAuthorizables is not None:
oprot.writeFieldBegin('oldAuthorizables', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.oldAuthorizables))
for iter61 in self.oldAuthorizables:
iter61.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.newAuthorizables is not None:
oprot.writeFieldBegin('newAuthorizables', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.newAuthorizables))
for iter62 in self.newAuthorizables:
iter62.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
if self.oldAuthorizables is None:
raise TProtocol.TProtocolException(message='Required field oldAuthorizables is unset!')
if self.newAuthorizables is None:
raise TProtocol.TProtocolException(message='Required field newAuthorizables is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(self.oldAuthorizables)
value = (value * 31) ^ hash(self.newAuthorizables)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRenamePrivilegesResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRenamePrivilegesResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSentryActiveRoleSet(object):
"""
Attributes:
- all
- roles
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'all', None, None, ), # 1
(2, TType.SET, 'roles', (TType.STRING,None), None, ), # 2
)
def __init__(self, all=None, roles=None,):
self.all = all
self.roles = roles
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.all = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.roles = set()
(_etype66, _size63) = iprot.readSetBegin()
for _i67 in xrange(_size63):
_elem68 = iprot.readString()
self.roles.add(_elem68)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TSentryActiveRoleSet')
if self.all is not None:
oprot.writeFieldBegin('all', TType.BOOL, 1)
oprot.writeBool(self.all)
oprot.writeFieldEnd()
if self.roles is not None:
oprot.writeFieldBegin('roles', TType.SET, 2)
oprot.writeSetBegin(TType.STRING, len(self.roles))
for iter69 in self.roles:
oprot.writeString(iter69)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.all is None:
raise TProtocol.TProtocolException(message='Required field all is unset!')
if self.roles is None:
raise TProtocol.TProtocolException(message='Required field roles is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.all)
value = (value * 31) ^ hash(self.roles)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesForProviderRequest(object):
"""
Attributes:
- protocol_version
- component
- serviceName
- groups
- roleSet
- authorizables
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'component', None, None, ), # 2
(3, TType.STRING, 'serviceName', None, None, ), # 3
(4, TType.SET, 'groups', (TType.STRING,None), None, ), # 4
(5, TType.STRUCT, 'roleSet', (TSentryActiveRoleSet, TSentryActiveRoleSet.thrift_spec), None, ), # 5
(6, TType.LIST, 'authorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 6
)
def __init__(self, protocol_version=thrift_spec[1][4], component=None, serviceName=None, groups=None, roleSet=None, authorizables=None,):
self.protocol_version = protocol_version
self.component = component
self.serviceName = serviceName
self.groups = groups
self.roleSet = roleSet
self.authorizables = authorizables
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.SET:
self.groups = set()
(_etype73, _size70) = iprot.readSetBegin()
for _i74 in xrange(_size70):
_elem75 = iprot.readString()
self.groups.add(_elem75)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.roleSet = TSentryActiveRoleSet()
self.roleSet.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.authorizables = []
(_etype79, _size76) = iprot.readListBegin()
for _i80 in xrange(_size76):
_elem81 = TAuthorizable()
_elem81.read(iprot)
self.authorizables.append(_elem81)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesForProviderRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 2)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 3)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 4)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter82 in self.groups:
oprot.writeString(iter82)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.roleSet is not None:
oprot.writeFieldBegin('roleSet', TType.STRUCT, 5)
self.roleSet.write(oprot)
oprot.writeFieldEnd()
if self.authorizables is not None:
oprot.writeFieldBegin('authorizables', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.authorizables))
for iter83 in self.authorizables:
iter83.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
if self.groups is None:
raise TProtocol.TProtocolException(message='Required field groups is unset!')
if self.roleSet is None:
raise TProtocol.TProtocolException(message='Required field roleSet is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(self.groups)
value = (value * 31) ^ hash(self.roleSet)
value = (value * 31) ^ hash(self.authorizables)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesForProviderResponse(object):
"""
Attributes:
- status
- privileges
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
(2, TType.SET, 'privileges', (TType.STRING,None), None, ), # 2
)
def __init__(self, status=None, privileges=None,):
self.status = status
self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.privileges = set()
(_etype87, _size84) = iprot.readSetBegin()
for _i88 in xrange(_size84):
_elem89 = iprot.readString()
self.privileges.add(_elem89)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesForProviderResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.SET, 2)
oprot.writeSetBegin(TType.STRING, len(self.privileges))
for iter90 in self.privileges:
oprot.writeString(iter90)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
if self.privileges is None:
raise TProtocol.TProtocolException(message='Required field privileges is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.privileges)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSentryPrivilegeMap(object):
"""
Attributes:
- privilegeMap
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'privilegeMap', (TType.STRING,None,TType.SET,(TType.STRUCT,(TSentryPrivilege, TSentryPrivilege.thrift_spec))), None, ), # 1
)
def __init__(self, privilegeMap=None,):
self.privilegeMap = privilegeMap
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.privilegeMap = {}
(_ktype92, _vtype93, _size91 ) = iprot.readMapBegin()
for _i95 in xrange(_size91):
_key96 = iprot.readString()
_val97 = set()
(_etype101, _size98) = iprot.readSetBegin()
for _i102 in xrange(_size98):
_elem103 = TSentryPrivilege()
_elem103.read(iprot)
_val97.add(_elem103)
iprot.readSetEnd()
self.privilegeMap[_key96] = _val97
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TSentryPrivilegeMap')
if self.privilegeMap is not None:
oprot.writeFieldBegin('privilegeMap', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.SET, len(self.privilegeMap))
for kiter104,viter105 in self.privilegeMap.items():
oprot.writeString(kiter104)
oprot.writeSetBegin(TType.STRUCT, len(viter105))
for iter106 in viter105:
iter106.write(oprot)
oprot.writeSetEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.privilegeMap is None:
raise TProtocol.TProtocolException(message='Required field privilegeMap is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.privilegeMap)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesByAuthRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- component
- serviceName
- authorizablesSet
- groups
- roleSet
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'component', None, None, ), # 3
(4, TType.STRING, 'serviceName', None, None, ), # 4
(5, TType.SET, 'authorizablesSet', (TType.STRING,None), None, ), # 5
(6, TType.SET, 'groups', (TType.STRING,None), None, ), # 6
(7, TType.STRUCT, 'roleSet', (TSentryActiveRoleSet, TSentryActiveRoleSet.thrift_spec), None, ), # 7
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, component=None, serviceName=None, authorizablesSet=None, groups=None, roleSet=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.component = component
self.serviceName = serviceName
self.authorizablesSet = authorizablesSet
self.groups = groups
self.roleSet = roleSet
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.SET:
self.authorizablesSet = set()
(_etype110, _size107) = iprot.readSetBegin()
for _i111 in xrange(_size107):
_elem112 = iprot.readString()
self.authorizablesSet.add(_elem112)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.SET:
self.groups = set()
(_etype116, _size113) = iprot.readSetBegin()
for _i117 in xrange(_size113):
_elem118 = iprot.readString()
self.groups.add(_elem118)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.roleSet = TSentryActiveRoleSet()
self.roleSet.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesByAuthRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 3)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 4)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.authorizablesSet is not None:
oprot.writeFieldBegin('authorizablesSet', TType.SET, 5)
oprot.writeSetBegin(TType.STRING, len(self.authorizablesSet))
for iter119 in self.authorizablesSet:
oprot.writeString(iter119)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 6)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter120 in self.groups:
oprot.writeString(iter120)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.roleSet is not None:
oprot.writeFieldBegin('roleSet', TType.STRUCT, 7)
self.roleSet.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
if self.authorizablesSet is None:
raise TProtocol.TProtocolException(message='Required field authorizablesSet is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(self.authorizablesSet)
value = (value * 31) ^ hash(self.groups)
value = (value * 31) ^ hash(self.roleSet)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesByAuthResponse(object):
"""
Attributes:
- status
- privilegesMapByAuth
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
(2, TType.MAP, 'privilegesMapByAuth', (TType.STRING,None,TType.STRUCT,(TSentryPrivilegeMap, TSentryPrivilegeMap.thrift_spec)), None, ), # 2
)
def __init__(self, status=None, privilegesMapByAuth=None,):
self.status = status
self.privilegesMapByAuth = privilegesMapByAuth
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.privilegesMapByAuth = {}
(_ktype122, _vtype123, _size121 ) = iprot.readMapBegin()
for _i125 in xrange(_size121):
_key126 = iprot.readString()
_val127 = TSentryPrivilegeMap()
_val127.read(iprot)
self.privilegesMapByAuth[_key126] = _val127
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesByAuthResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.privilegesMapByAuth is not None:
oprot.writeFieldBegin('privilegesMapByAuth', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.privilegesMapByAuth))
for kiter128,viter129 in self.privilegesMapByAuth.items():
oprot.writeString(kiter128)
viter129.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.privilegesMapByAuth)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| StarcoderdataPython |
74098 | <filename>client_sdk_python/middleware/normalize_request_parameters.py
from client_sdk_python.packages.eth_utils import (
is_string,
)
from client_sdk_python.utils.formatters import (
apply_formatter_at_index,
apply_formatter_if,
apply_formatters_to_dict,
)
from .formatting import (
construct_formatting_middleware,
)
FILTER_PARAM_NORMALIZERS = apply_formatters_to_dict({
'address': apply_formatter_if(is_string, lambda x: [x])})
METHOD_NORMALIZERS = {
'platon_getLogs': apply_formatter_at_index(FILTER_PARAM_NORMALIZERS, 0),
'platon_newFilter': apply_formatter_at_index(FILTER_PARAM_NORMALIZERS, 0)
}
request_parameter_normalizer = construct_formatting_middleware(
request_formatters=METHOD_NORMALIZERS,
)
| StarcoderdataPython |
3320693 | from mindpile.Mapping.types import OutPort
from mindpile.Mapping.utils import MethodCall, Requires, Setup
@Setup
def largeMotorSetup():
return '''
from ev3dev2.motor import LargeMotor
'''
@MethodCall(target="MotorStop.vix", MotorPort=OutPort, BrakeAtEnd=bool)
@Requires(largeMotorSetup)
def largeMotorStop():
return '''
m(MotorPort, LargeMotor).stop(stop_action=("hold" if BrakeAtEnd else "coast"))
'''
@MethodCall(target="MotorUnlimited.vix", MotorPort=OutPort, Speed=float)
@Requires(largeMotorSetup)
def largeMotorUnlimited():
return '''
m(MotorPort, LargeMotor).on(speed=Speed)
'''
@MethodCall(target="MotorTime.vix", MotorPort=OutPort, Speed=float, Seconds=float, BrakeAtEnd=bool)
@Requires(largeMotorSetup)
def largeMotorSeconds():
return '''
m(MotorPort, LargeMotor).on_for_seconds(speed=Speed, seconds=Seconds, brake=BrakeAtEnd)
'''
@MethodCall(target="MotorDistance.vix", MotorPort=OutPort, Speed=float, Degrees=float, BrakeAtEnd=bool)
@Requires(largeMotorSetup)
def largeMotorDegrees():
return '''
m(MotorPort, LargeMotor).on_for_degrees(speed=Speed, degrees=Degrees, brake=BrakeAtEnd)
'''
@MethodCall(target="MotorDistanceRotations.vix", MotorPort=OutPort, Speed=float, Rotations=float, BrakeAtEnd=bool)
@Requires(largeMotorSetup)
def largeMotorRotations():
return '''
m(MotorPort, LargeMotor).on_for_rotations(speed=Speed, rotations=Rotations, brake=BrakeAtEnd)
'''
| StarcoderdataPython |
1751661 | """
Summary => The controller class for initializing the Robotic aritst.
Description => Will initilize the sequence of events for robotic artist.
This involves initializing the GUI and this will then initilize all
the other requirements in the product.
Author => <NAME> (mah60).
Version =>
0.1 - 23/02/2018 - This version is the intial set up for the
controller. It contains a basic initialization setup and
a way to read any input.
0.1.1 - 26/02/2018 - corrected the calling of the serial command.
0.2 - 12/03/2018 - recreated file and renamed it to Application.py. This code
is now in code_v2. The basic set has done to start the gui and
run the program as a whole.
0.3 - 15/04/2018 - changed to support python2 instead of python3. This means
pyqt5 is changed to pyqt4.
"""
import sys
from main_window import MainWindow
from PyQt4.QtGui import QApplication
def main():
"""
Summary => will initilize the robotic artist.
Description => Will initilize the sequence of events for robotic artist.
This involves initializing the GUI and this will then initilize all
the other requirements in the product.
args => None
return => None
"""
app = QApplication(sys.argv)
window = MainWindow()
# ui = Ui_mainWindow()
# ui.setupUi(window)
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| StarcoderdataPython |
1697394 | <gh_stars>0
def stations_level_over_threshold(stations, tol):
names = []
ranges = []
waterlevels = []
relwaterlevels = []
for station in stations:
names.append(station.name)
waterlevels.append(station.latest_level)
ranges.append(station.typical_range)
for z in range(len(ranges)):
if ranges[z] == None:
pass
else:
value = ranges[z][1] - ranges[z][0]
if value < 0:
pass
elif waterlevels[z] == None:
pass
else:
rannum = ranges[z][1] - ranges[z][0]
relwl = (waterlevels[z]- ranges[z][0]) / rannum
if relwl > tol:
relwat = (names[z], relwl)
relwaterlevels.append(relwat)
else:
pass
return sorted(relwaterlevels, key=lambda x: -x[1])
def stations_highest_rel_level(stations, N):
names = []
ranges = []
waterlevels = []
relwaterlevels = []
for station in stations:
names.append(station.name)
waterlevels.append(station.latest_level)
ranges.append(station.typical_range)
for z in range(len(ranges)):
if ranges[z] == None:
pass
else:
value = ranges[z][1] - ranges[z][0]
if value < 0:
pass
elif waterlevels[z] == None:
pass
else:
rannum = ranges[z][1] - ranges[z][0]
relwl = (waterlevels[z]- ranges[z][0]) / rannum
relwat = (names[z], relwl)
relwaterlevels.append(relwat)
v = sorted(relwaterlevels, key=lambda x: -x[1])
c = v[:N]
return(c) | StarcoderdataPython |
9805 | import os
SERVER_NAME = os.getenv('DOMAIN_SUPERSET')
PUBLIC_ROLE_LIKE_GAMMA = True
SESSION_COOKIE_SAMESITE = None # One of [None, 'Lax', 'Strict']
SESSION_COOKIE_HTTPONLY = False
MAPBOX_API_KEY = os.getenv('MAPBOX_API_KEY', '')
POSTGRES_DB=os.getenv('POSTGRES_DB')
POSTGRES_PASSWORD=os.getenv('POSTGRES_PASSWORD')
POSTGRES_USER=os.getenv('POSTGRES_USER')
POSTGRES_PORT=str(os.getenv('POSTGRES_PORT'))
HTTP_HEADERS = {'X-Frame-Options': 'ALLOWALL'}
sql_alchemy_string='postgresql+psycopg2://'+POSTGRES_USER+':'+POSTGRES_PASSWORD+'@postgres:'+POSTGRES_PORT+'/'+POSTGRES_DB
CACHE_CONFIG = {
'CACHE_TYPE': 'redis',
'CACHE_DEFAULT_TIMEOUT': 300,
'CACHE_KEY_PREFIX': 'superset_',
'CACHE_REDIS_HOST': 'redis',
'CACHE_REDIS_PORT': 6379,
'CACHE_REDIS_DB': 1,
'CACHE_REDIS_URL': 'redis://redis:6379/1'}
SQLALCHEMY_DATABASE_URI = \
sql_alchemy_string
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'thisISaSECRET_1234' | StarcoderdataPython |
1774267 | """
Module containing the code for the add command in then CLI.
"""
import json
import logging
from pathlib import Path
import os
from .operations.environment_manager_operations import EnvironmentManagerOperations
from .common_operations import (
append_requirement, backup_requirements,
rollback_requirement, log_add_library
)
from .settings import SettingsManager
from ..constants import VENV, CONDA
logger = logging.getLogger('gryphon')
# TODO: Think about freeze feature (at time of handover)
# TODO: Check if the provided library is a valid one.
# TODO: Have some library list suggestion for each usage category the user has.
def add(library_name, version=None, cwd=Path.cwd()):
"""
Add command from the OW Gryphon CLI.
"""
logger.info("Adding required lib.")
requirements_backup = backup_requirements(cwd)
lib = library_name
if version is not None:
lib = f"{library_name}=={version}"
append_requirement(lib, location=cwd)
try:
with open(SettingsManager.get_config_path(), "r", encoding="UTF-8") as f:
env_manager = json.load(f)["environment_management"]
if env_manager == VENV:
EnvironmentManagerOperations.install_libraries_venv()
elif env_manager == CONDA:
EnvironmentManagerOperations.install_libraries_conda()
else:
env_list = [VENV, CONDA]
raise RuntimeError(f"Invalid environment manager on the config file: \"{env_manager}\"."
f"Should be one of {env_list}. Restoring the default config file should solve.")
except RuntimeError as e:
rollback_requirement(requirements_backup, location=cwd)
logger.warning("Rolled back the changes from last command.")
raise e
else:
log_add_library([library_name])
finally:
os.remove(requirements_backup)
| StarcoderdataPython |
3365152 | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_config import cfg
from oslo_log import log
from taskflow import task
from poppy.distributed_task.utils import memoized_controllers
from poppy.transport.pecan.models.request import ssl_certificate
LOG = log.getLogger(__name__)
conf = cfg.CONF
conf(project='poppy', prog='poppy', args=[])
class GetCertInfoTask(task.Task):
default_provides = "cert_obj_json"
def execute(self, domain_name, cert_type, flavor_id, project_id):
service_controller, self.ssl_certificate_manager = \
memoized_controllers.task_controllers('poppy', 'ssl_certificate')
self.storage = self.ssl_certificate_manager.storage
res = self.storage.get_certs_by_domain(
domain_name, project_id=project_id,
flavor_id=flavor_id, cert_type=cert_type)
if res is None:
return ""
return json.dumps(res.to_dict())
class CheckCertStatusTask(task.Task):
default_provides = "status_change_to"
def __init__(self):
super(CheckCertStatusTask, self).__init__()
service_controller, self.providers = \
memoized_controllers.task_controllers('poppy', 'providers')
self.akamai_driver = self.providers['akamai'].obj
def execute(self, cert_obj_json):
if cert_obj_json != "":
cert_obj = ssl_certificate.load_from_json(
json.loads(cert_obj_json))
if cert_obj.cert_type == 'san':
latest_sps_id = cert_obj.\
cert_details['Akamai']['extra_info'].get(
'akamai_spsId')
current_status = cert_obj.\
cert_details['Akamai']['extra_info'].get(
'status')
if latest_sps_id is None:
return current_status
resp = self.akamai_driver.akamai_sps_api_client.get(
self.akamai_driver.akamai_sps_api_base_url.format(
spsId=latest_sps_id
)
)
if resp.status_code != 200:
raise RuntimeError('SPS API Request Failed'
'Exception: %s' % resp.text)
sps_request_info = json.loads(resp.text)['requestList'][0]
status = sps_request_info['status']
workFlowProgress = sps_request_info.get(
'workflowProgress')
# This SAN Cert is on pending status
if status == 'SPS Request Complete':
LOG.info("SPS completed for %s..." %
cert_obj.get_edge_host_name())
return "deployed"
elif status == 'edge host already created or pending':
if workFlowProgress is not None and \
'error' in workFlowProgress.lower():
LOG.info("SPS Pending with Error:" %
workFlowProgress)
return "failed"
else:
return "deployed"
elif status == 'CPS cancelled':
return "cancelled"
else:
LOG.info(
"SPS Not completed for domain {0}, san_cert {1}. "
"Found status {2}. "
"Returning certificate object to Queue.".format(
cert_obj.domain_name,
cert_obj.get_edge_host_name(),
status
)
)
# convert cert_obj_json from unicode -> string
# before enqueue
self.akamai_driver.san_mapping_queue.enqueue_san_mapping(
json.dumps(cert_obj.to_dict()))
return ""
elif cert_obj.cert_type == 'sni':
change_url = cert_obj.cert_details['Akamai']['extra_info'].get(
'change_url')
current_status = cert_obj.\
cert_details['Akamai']['extra_info'].get(
'status')
if change_url is None:
return current_status
enrollment_id = self.akamai_driver.cert_info_storage.\
get_cert_enrollment_id(cert_obj.get_edge_host_name())
headers = {
'Accept': (
'application/vnd.akamai.cps.enrollment.v1+json')
}
resp = self.akamai_driver.akamai_cps_api_client.get(
self.akamai_driver.akamai_cps_api_base_url.format(
enrollmentId=enrollment_id
),
headers=headers
)
if resp.status_code not in [200, 202]:
LOG.error(
"Unable to retrieve enrollment while attempting"
"to update cert status. Status {0} Body {1}".format(
resp.status_code,
resp.text
)
)
return current_status
resp_json = json.loads(resp.text)
pending_changes = resp_json["pendingChanges"]
dns_names = (
resp_json["networkConfiguration"]["sni"]["dnsNames"]
)
if change_url not in pending_changes:
if cert_obj.domain_name in dns_names:
return "deployed"
else:
return "failed"
else:
# the change url is still present under pending changes,
# return the item to the queue. another attempt to
# check and update the cert status should happen
self.akamai_driver.san_mapping_queue.enqueue_san_mapping(
json.dumps(cert_obj.to_dict()))
return current_status
class UpdateCertStatusTask(task.Task):
def __init__(self):
super(UpdateCertStatusTask, self).__init__()
service_controller, self.ssl_certificate_manager = \
memoized_controllers.task_controllers('poppy', 'ssl_certificate')
self.storage_controller = (
self.ssl_certificate_manager.storage
)
self.service_storage = service_controller.storage_controller
def execute(self, project_id, cert_obj_json, status_change_to):
if not cert_obj_json:
return
cert_obj = ssl_certificate.load_from_json(
json.loads(cert_obj_json)
)
cert_details = cert_obj.cert_details
if status_change_to:
cert_details['Akamai']['extra_info']['status'] = (
status_change_to)
cert_details['Akamai'] = json.dumps(cert_details['Akamai'])
self.storage_controller.update_certificate(
cert_obj.domain_name,
cert_obj.cert_type,
cert_obj.flavor_id,
cert_details
)
service_obj = (
self.service_storage.
get_service_details_by_domain_name(cert_obj.domain_name)
)
# Update provider details
if service_obj:
service_obj.provider_details['Akamai'].\
domains_certificate_status.\
set_domain_certificate_status(cert_obj.domain_name,
status_change_to)
self.service_storage.update_provider_details(
project_id,
service_obj.service_id,
service_obj.provider_details
)
else:
pass
| StarcoderdataPython |
3237285 | <reponame>dealfonso/ipfloater
#! /usr/bin/env python
# coding: utf-8
#
# Floating IP Addresses manager (IPFloater)
# Copyright (C) 2015 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import config
import sys
import version
from cpyutils.parameters import CmdLineParser, Flag, Parameter, Argument
import cpyutils.eventloop as eventloop
import cpyutils.db as db
import cpyutils.xmlrpcutils as xmlrpcutils
import cpyutils.log
import cpyutils.config
import endpoint
import iptables
import arptable
import os
from cpyutils.iputils import check_ip
import signal
'''
In the awful case that a uuid already exists in the iptables table, how should the endpoint proceed? The
OVERWRITE_RULES flag states wether to overwrite the rule or raise an error.
- perhaps a new id should be generated? (this could happen although statiscally has a low probability)
- this flag in inherited from when sequential numbers were used for ids and all started at 0 at boot
'''
OVERWRITE_RULES=True
_LOGGER = cpyutils.log.Log("IPFLOATER")
def get_endpoint_manager():
global _ENDPOINT_MANAGER
return _ENDPOINT_MANAGER
def get_arp_table():
global _ARP_TABLE
return _ARP_TABLE
_ENDPOINT_MANAGER = None
_ARP_TABLE = None
def create_public_redirection(ip_pub, port_pub, ip_priv, port_priv):
'''
This method requests a whole specific public IP to be redirected to a private IP
'''
if ip_pub == "": ip_pub = None
if port_pub < 0: port_pub = None
if ip_priv == "": ip_priv = None
if port_priv < 0: port_priv = None
if _ENDPOINT_MANAGER is None:
return False, "Endpoint Manager not found"
ep_list, info = _ENDPOINT_MANAGER.request_endpoint(ip_pub, port_pub, ip_priv, port_priv)
if len(ep_list) == 0:
return False, "Could not obtain a redirection for %s:%d (%s)" % (ip_priv, port_priv, info)
if len(ep_list) > 0:
ep = ep_list[0]
result, msg = _ENDPOINT_MANAGER.apply_endpoint(ep)
if not result:
return False, "Could not apply the redirection %s (%s)" % (ep, msg)
return True, str(ep)
else:
return False, "Could not find any free IP"
def unregister_redirection_to(dst_ip, dst_port):
if _ENDPOINT_MANAGER is None:
return False, "Endpoint Manager not found"
result = _ENDPOINT_MANAGER.terminate_redirection_to(dst_ip, dst_port)
if not result:
return False, "Could not delete the redirection to %s:%d. Does it exist?" % (dst_ip, dst_port)
return True, "Redirection %s unregistered" % ep
def unregister_redirection_from(public_ip, public_port):
if _ENDPOINT_MANAGER is None:
return False, "Endpoint Manager not found"
result = _ENDPOINT_MANAGER.terminate_redirection_from(public_ip, public_port)
if not result:
return False, "Could not delete the redirection from %s:%d. Does it exist?" % (public_ip, public_port)
return True, "Redirection %s unregistered" % ep
def unregister_redirection(public_ip, public_port, private_ip, private_port):
if _ENDPOINT_MANAGER is None:
return False, "Endpoint Manager not found"
ep = Endpoint(public_ip, public_port, private_ip, private_port)
result = _ENDPOINT_MANAGER.terminate_endpoint(ep)
if not result:
return False, "Could not delete the redirection %s. Does it exist?" % (str(ep))
return True, "Redirection %s unregistered" % ep
def clean_private_ip(private_ip):
if _ENDPOINT_MANAGER is None:
return False, "Endpoint Manager not found"
if not _ENDPOINT_MANAGER.clean_private_ip(private_ip):
return False, "Could not clean the redirections to %s. Do they exist?" % (private_ip)
return True, "Redirections to %s unregistered" % private_ip
def clean_public_ip(public_ip):
if _ENDPOINT_MANAGER is None:
return False, "Endpoint Manager not found"
if not _ENDPOINT_MANAGER.clean_public_ip(public_ip):
return False, "Could not clean the redirections from %s. Do they exist?" % (public_ip)
return True, "Redirections from %s unregistered" % public_ip
def arp(mac):
global _ARP_TABLE
if _ARP_TABLE is None:
return False, "ARP table not found"
ip = _ARP_TABLE.get_ip(mac)
if ip is None:
return False, "Could not get the IP address for %s" % (mac)
return True, ip
def get_public_ips():
if _ENDPOINT_MANAGER is None:
return False, "Endpoint Manager not found"
return True, _ENDPOINT_MANAGER.get_public_ips()
def get_version():
return version.get()
def get_redirections():
return str(_ENDPOINT_MANAGER)
def handler_sigint(signal, frame):
_LOGGER.info("removing iptables rules")
iptables.cleanup_rules()
# iptables.find_endpointchains_and_remove()
sys.exit(0)
def main_loop():
global _ENDPOINT_MANAGER, _ARP_TABLE
eventloop.create_eventloop(True)
ap = CmdLineParser("ipfloater", "This is a server that deals with iptables to enable floating IPs in private networks", [
Flag("--block-public", "-b", value = True, default = config.config.BLOCK_PUBLIC_IPS),
Parameter("--db", "-d", "The path for the persistence file", 1, False, [config.config.DB]),
Parameter("--listen-ip", "-i", "The ip adress in which ipfloater will listen for xmlrpc requests", 1, False, [ config.config.LISTEN_IP ]),
Parameter("--listen-port", "-p", "The ip port in which ipfloater will listen for xmlrpc requests", 1, False, [ config.config.LISTEN_PORT ]),
Parameter("--rest-ip", "-s", "The ip adress in which ipfloater will listen for restful requests", 1, False, [ config.config.REST_IP ]),
Parameter("--rest-port", "-t", "The ip port in which ipfloater will listen for restful requests", 1, False, [ config.config.REST_PORT ]),
Parameter("--arp-table", "-a", "The file that contains a set of whitespace separated pairs MAC IP that will be used to resolve arp requests. The IPs will also be added to the IP pool.", 1, False, [ config.config.IP_POOL_FILE ]),
])
# Will try to exit removing the iptables rules
signal.signal(signal.SIGINT, handler_sigint)
signal.signal(signal.SIGTERM, handler_sigint)
parsed, result, info = ap.parse(sys.argv[1:])
if not parsed:
if (result is None):
print "Error:", info
sys.exit(-1)
else:
print info
sys.exit(0)
SERVER=result.values['--listen-ip'][0]
PORT=result.values['--listen-port'][0]
_ENDPOINT_MANAGER = endpoint.EndpointManager(result.values['--db'][0])
_ARP_TABLE = arptable.ARPTable()
arp_filename = result.values['--arp-table'][0]
if arp_filename != "":
arp_filename = os.path.expanduser(os.path.expandvars(arp_filename))
if _ARP_TABLE.read_from_file(arp_filename) is not None:
for ip in _ARP_TABLE.get_ips():
_ENDPOINT_MANAGER.add_public_ip(ip)
for ip in _ARP_TABLE.get_ips_without_mac():
_ENDPOINT_MANAGER.add_public_ip(ip)
# TODO: persist in database
for ip in config.config.IP_POOL:
_ENDPOINT_MANAGER.add_public_ip(ip)
for ipmask in config.config.PRIVATE_IP_RANGES:
_ENDPOINT_MANAGER.add_private_range(ipmask)
if not xmlrpcutils.create_xmlrpc_server_in_thread(SERVER, PORT, [arp, create_public_redirection, unregister_redirection, unregister_redirection_from, unregister_redirection_to, clean_private_ip, clean_public_ip, get_version, get_redirections, get_public_ips]):
_LOGGER.error("could not setup the service")
raise Exception("could not setup the service")
#if REMOVE_RULES_AT_BOOT:
# iptables.find_endpointchains_and_remove()
iptables.cleanup_rules()
iptables.setup_basic_rules()
if result.values['--block-public']:
for ip in _ENDPOINT_MANAGER.get_public_ips():
iptables.block_ip(ip)
_ENDPOINT_MANAGER.get_data_from_db()
_LOGGER.info("server running in %s:%d" % (SERVER, PORT))
RESTIP=result.values['--rest-ip'][0]
RESTPORT=result.values['--rest-port'][0]
try:
RESTPORT = int(RESTPORT)
except:
RESTPORT = 0
if (RESTIP is not None) and (RESTIP != "") and (RESTPORT > 0):
import restserver
import cpyutils.restutils
cpyutils.restutils.run_in_thread(RESTIP, RESTPORT)
_LOGGER.info("REST server running in %s:%d" % (RESTIP, RESTPORT))
eventloop.get_eventloop().loop()
if __name__ == '__main__':
main_loop()
| StarcoderdataPython |
1610124 | def aumentar(preco=0, taxa=0, formato=False):
"""
-> Função que retorna o valor monetario formatado
:param preco: valor a ser formatado
:param taxa: percentual a ser adicionado
:param formato: formata a moeda
:return: se falso não formata se true formata
"""
res = preco + (preco * taxa/100)
return res if formato is False else moeda(res)
def diminuir(preco=0, taxa=0, formato=False):
res = preco - (preco * taxa/100)
return res if formato is False else moeda(res)
def dobro(preco=0, formato=False):
res = preco * 2
return res if not formato else moeda(res)
def metade(preco=0, formato=False):
res = preco / 2
return res if not formato else moeda(res)
def moeda(preco = 0, moeda = 'R$'):
"""
-> Função que troca o ponto pela virgula
:param preco: unidade em reais
:param moeda: parâmetro que será convertido em R$00,00
:return: devolve formatado o preço passado
"""
return f'{moeda}{preco:>6.2f}'.replace('.',',')
def resumo(preco=0, taxaa=1, taxar=1):
"""
O uso do \t é para fazer a tabulação
"""
print('-+-'* 15)
print('RESUMO DO VALOR'.center(40))
print('-+-'* 15)
print(f'Preço analisado: \t{moeda(preco)}')
print(f'Dobro do preço: \t{dobro(preco, True)}')
print(f'Metade do preço: \t{metade(preco, True)}')
print(f'{taxaa}% aumento: \t\t{aumentar(preco, taxaa, True)}')
print(f'{taxar}% de redução: \t{diminuir(preco, taxar, True)}')
print('-+-' * 15)
| StarcoderdataPython |
1797653 | # influenced by https://www.reddit.com/r/adventofcode/comments/a3kr4r/2018_day_6_solutions/eb7385m/
import itertools
from collections import defaultdict, Counter
def part1(points):
max_x, max_y = max(x[0] for x in points), max(x[1] for x in points)
grid = defaultdict(lambda: -1)
for x, y in itertools.product(range(max_x + 1), range(max_y + 1)):
closest_dist = min(abs(x - i) + abs(y - j) for i, j in points)
closest_points = [
(i, j) for i, j in points if abs(x - i) + abs(y - j) == closest_dist
]
if len(closest_points) > 1:
grid[x, y] = -1
else:
grid[x, y] = closest_points[0]
# Exclude corners of grid
infinite_points = (
set(grid[(x, max_y - 1)] for x in range(max_x))
.union((grid[(max_x - 1, y)] for y in range(max_y)))
.union((grid[(x, 0)] for x in range(max_x)))
.union((grid[(0, y)] for y in range(max_y)))
)
grid_values = list(grid.values())
return max(grid_values.count(p) for p in points if p not in infinite_points)
def part2(points):
max_x, max_y = max(x[0] for x in points), max(x[1] for x in points)
return sum(
1
for x in range(max_x)
for y in range(max_y)
if sum(abs(x - i) + abs(y - j) for i, j in points) < 10000
)
if __name__ == "__main__":
with open("6.txt") as f:
points = f.readlines()
points = [tuple(int(i) for i in l.split(",")) for l in points]
print("Part 1: {}".format(part1(points)))
print("Part 2: {}".format(part2(points)))
| StarcoderdataPython |
3317605 | example = """ |
| +--+
A | C
F---|----E|--+
| | | D
+B-+ +--+
"""
import os.path
INPUT=os.path.join(os.path.dirname(__file__), "input.txt")
with open(INPUT) as f:
data = f.read()
# Part 1
def func(data):
# find start
maze = []
for line in data.splitlines():
maze.append(line)
pos = None
dir = None
for i, c in enumerate(maze[0]):
if c != ' ':
pos = (0, i)
dir = (1, 0)
break
# Search
letters = []
steps = 0
while True:
steps += 1
old_pos = pos
next_y = pos[0] + dir[0]
next_x = pos[1] + dir[1]
next_c = maze[next_y][next_x] if 0 <= next_y < len(maze) and 0 <= next_x < len(maze[next_y]) else ' '
if next_c != ' ':
pos = (next_y, next_x)
if next_c.isalpha():
letters.append(next_c)
else:
if dir[0] == 0:
new_dirs = [(-1, 0), (1, 0)]
else:
new_dirs = [(0, -1), (0, 1)]
for new_dir in new_dirs:
next_y = pos[0] + new_dir[0]
next_x = pos[1] + new_dir[1]
next_c = maze[next_y][next_x] if 0 <= next_y < len(maze) and 0 <= next_x < len(maze[next_y]) else ' '
if next_c != ' ':
pos = (next_y, next_x)
dir = new_dir
if next_c.isalpha():
letters.append(next_c)
break
if old_pos == pos:
break
return "".join(letters), steps
print(func(example))
print(func(data)) | StarcoderdataPython |
4841632 | #!usr/bin/env python
from tkinter import *
root = Tk()
v = StringVar()
def test(content, reason, name):
if content == "whu":
print("right")
print(content, reason, name)
return True
else:
print("fault")
print(content, reason, name)
return False
testCMD = root.register(test)
e1 = Entry(root, textvariable=v, validate="focusout", validatecommand=(testCMD, '%P', '%v', '%W'))
e2 = Entry(root)
e1.pack(padx=10, pady=10)
e2.pack(padx=10, pady=10)
mainloop() | StarcoderdataPython |
3287488 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.io.fits.util import _is_int
__all__ = ['BoundingBox']
class BoundingBox:
"""
A rectangular bounding box in integer (not float) pixel indices.
Parameters
----------
ixmin, ixmax, iymin, iymax : int
The bounding box pixel indices. Note that the upper values
(``iymax`` and ``ixmax``) are exclusive as for normal slices in
Python. The lower values (``ixmin`` and ``iymin``) must not be
greater than the respective upper values (``ixmax`` and
``iymax``).
Examples
--------
>>> from regions import BoundingBox
>>> # constructing a BoundingBox like this is cryptic:
>>> bbox = BoundingBox(1, 10, 2, 20)
>>> # it's better to use keyword arguments for readability:
>>> bbox = BoundingBox(ixmin=1, ixmax=10, iymin=2, iymax=20)
>>> bbox # nice repr, useful for interactive work
BoundingBox(ixmin=1, ixmax=10, iymin=2, iymax=20)
>>> # sometimes it's useful to check if two bounding boxes are the same
>>> bbox == BoundingBox(ixmin=1, ixmax=10, iymin=2, iymax=20)
True
>>> bbox == BoundingBox(ixmin=7, ixmax=10, iymin=2, iymax=20)
False
>>> # "shape" and "slices" can be useful when working with numpy arrays
>>> bbox.shape # numpy order: (y, x)
(18, 9)
>>> bbox.slices # numpy order: (y, x)
(slice(2, 20, None), slice(1, 10, None))
>>> # "extent" is useful when plotting the BoundingBox with matplotlib
>>> bbox.extent # matplotlib order: (x, y)
(0.5, 9.5, 1.5, 19.5)
"""
def __init__(self, ixmin, ixmax, iymin, iymax):
if not _is_int(ixmin):
raise TypeError('ixmin must be an integer')
if not _is_int(ixmax):
raise TypeError('ixmax must be an integer')
if not _is_int(iymin):
raise TypeError('iymin must be an integer')
if not _is_int(iymax):
raise TypeError('iymax must be an integer')
if ixmin > ixmax:
raise ValueError('ixmin must be <= ixmax')
if iymin > iymax:
raise ValueError('iymin must be <= iymax')
self.ixmin = ixmin
self.ixmax = ixmax
self.iymin = iymin
self.iymax = iymax
@classmethod
def from_float(cls, xmin, xmax, ymin, ymax):
"""
Return the smallest bounding box that fully contains a given
rectangle defined by float coordinate values.
Following the pixel index convention, an integer index
corresponds to the center of a pixel and the pixel edges span
from (index - 0.5) to (index + 0.5). For example, the pixel
edge spans of the following pixels are:
- pixel 0: from -0.5 to 0.5
- pixel 1: from 0.5 to 1.5
- pixel 2: from 1.5 to 2.5
In addition, because `BoundingBox` upper limits are exclusive
(by definition), 1 is added to the upper pixel edges. See
examples below.
Parameters
----------
xmin, xmax, ymin, ymax : float
Float coordinates defining a rectangle. The lower values
(``xmin`` and ``ymin``) must not be greater than the
respective upper values (``xmax`` and ``ymax``).
Returns
-------
bbox : `BoundingBox` object
The minimal ``BoundingBox`` object fully containing the
input rectangle coordinates.
Examples
--------
>>> from regions import BoundingBox
>>> BoundingBox.from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0)
BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21)
>>> BoundingBox.from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6)
BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12)
"""
ixmin = int(np.floor(xmin + 0.5))
ixmax = int(np.ceil(xmax + 0.5))
iymin = int(np.floor(ymin + 0.5))
iymax = int(np.ceil(ymax + 0.5))
return cls(ixmin, ixmax, iymin, iymax)
def __eq__(self, other):
if not isinstance(other, BoundingBox):
raise TypeError('Can compare BoundingBox only to another '
'BoundingBox.')
return (
(self.ixmin == other.ixmin) and
(self.ixmax == other.ixmax) and
(self.iymin == other.iymin) and
(self.iymax == other.iymax)
)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __repr__(self):
data = self.__dict__
data['name'] = self.__class__.__name__
fmt = ('{name}(ixmin={ixmin}, ixmax={ixmax}, iymin={iymin}, '
'iymax={iymax})')
return fmt.format(**data)
@property
def shape(self):
"""
The ``(ny, nx)`` shape of the bounding box.
"""
return self.iymax - self.iymin, self.ixmax - self.ixmin
@property
def slices(self):
"""
The bounding box as a tuple of `slice` objects.
The slice tuple is in numpy axis order (i.e. ``(y, x)``) and
therefore can be used to slice numpy arrays.
"""
return slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax)
@property
def extent(self):
"""
The extent of the mask, defined as the ``(xmin, xmax, ymin,
ymax)`` bounding box from the bottom-left corner of the
lower-left pixel to the upper-right corner of the upper-right
pixel.
The upper edges here are the actual pixel positions of the
edges, i.e. they are not "exclusive" indices used for python
indexing. This is useful for plotting the bounding box using
Matplotlib.
"""
return (
self.ixmin - 0.5,
self.ixmax - 0.5,
self.iymin - 0.5,
self.iymax - 0.5,
)
def as_artist(self, **kwargs):
"""
Return a `matplotlib.patches.Rectangle` that represents the
bounding box.
Parameters
----------
kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
result : `matplotlib.patches.Rectangle`
A matplotlib rectangular patch.
Examples
--------
.. plot::
:include-source:
import matplotlib.pyplot as plt
from regions import BoundingBox
bbox = BoundingBox(2, 7, 3, 8)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(12345)
ax.imshow(np.random.random((10, 10)), interpolation='nearest', cmap='viridis')
ax.add_patch(bbox.as_artist(facecolor='none', edgecolor='white', lw=2.))
"""
from matplotlib.patches import Rectangle
return Rectangle(xy=(self.extent[0], self.extent[2]),
width=self.shape[1], height=self.shape[0], **kwargs)
def to_region(self):
"""
Return a `~regions.RectanglePixelRegion` that
represents the bounding box.
"""
from ..shapes import RectanglePixelRegion
from .pixcoord import PixCoord
xpos = (self.extent[1] + self.extent[0]) / 2.
ypos = (self.extent[3] + self.extent[2]) / 2.
xypos = PixCoord(xpos, ypos)
h, w = self.shape
return RectanglePixelRegion(center=xypos, width=w, height=h)
def plot(self, origin=(0, 0), ax=None, **kwargs):
"""
Plot the `BoundingBox` on a matplotlib `~matplotlib.axes.Axes`
instance.
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `~matplotlib.axes.Axes` instance
is used.
kwargs : `dict`
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes on which the patch is added.
"""
reg = self.to_region()
return reg.plot(origin=origin, ax=ax, **kwargs)
def union(self, other):
"""
Return a `BoundingBox` representing the union of this
`BoundingBox` with another `BoundingBox`.
Parameters
----------
other : `~photutils.BoundingBox`
The `BoundingBox` to join with this one.
Returns
-------
result : `~photutils.BoundingBox`
A `BoundingBox` representing the union of the input
`BoundingBox` with this one.
"""
if not isinstance(other, BoundingBox):
raise TypeError('BoundingBox can be joined only with another '
'BoundingBox.')
ixmin = min((self.ixmin, other.ixmin))
ixmax = max((self.ixmax, other.ixmax))
iymin = min((self.iymin, other.iymin))
iymax = max((self.iymax, other.iymax))
return BoundingBox(ixmin=ixmin, ixmax=ixmax, iymin=iymin, iymax=iymax)
def intersection(self, other):
"""
Return a `BoundingBox` representing the intersection of this
`BoundingBox` with another `BoundingBox`.
Parameters
----------
other : `~photutils.BoundingBox`
The `BoundingBox` to intersect with this one.
Returns
-------
result : `~photutils.BoundingBox`
A `BoundingBox` representing the intersection of the input
`BoundingBox` with this one.
"""
if not isinstance(other, BoundingBox):
raise TypeError('BoundingBox can be intersected only with '
'another BoundingBox.')
ixmin = max(self.ixmin, other.ixmin)
ixmax = min(self.ixmax, other.ixmax)
iymin = max(self.iymin, other.iymin)
iymax = min(self.iymax, other.iymax)
if ixmax < ixmin or iymax < iymin:
return None
return BoundingBox(ixmin=ixmin, ixmax=ixmax, iymin=iymin, iymax=iymax)
| StarcoderdataPython |
1714998 | <gh_stars>0
"""Common classes used as base for the integration test
and the unittest.
"""
import io
import logging
import os
import shutil
import tempfile
import time
import unittest
import requests
import urllib3
from app import create_app
class BaseTest(unittest.TestCase):
@classmethod
def get_testdata_path(cls, filename):
"""Return the absolute location of the filename in the testdatadir"""
current_file_dir = os.path.dirname(os.path.abspath(__file__))
testdata_dir = os.path.join(os.path.dirname(current_file_dir), "testdata")
return os.path.join(testdata_dir, filename)
class BaseApiTest(BaseTest):
def setUp(self):
"""Before each test:
* set the TESTING to true
* set the upload directory to a temporary directory
* ensure we don't run the test in debug
"""
self.flask_app = create_app(testing=True)
self.wms_cache_dir = tempfile.mkdtemp()
self.cm_outputs_dir = tempfile.mkdtemp()
self.flask_app.config["WMS_CACHE_DIR"] = self.wms_cache_dir
self.flask_app.config["CM_OUTPUTS_DIR"] = self.cm_outputs_dir
self.flask_app.config["FILTER_DATASETS"] = True
self.client = self.flask_app.test_client()
self.client.follow_redirect = True
self.assertEqual(self.flask_app.debug, False)
def tearDown(self):
"""After each test, cleanup the upload directory"""
shutil.rmtree(self.flask_app.config["WMS_CACHE_DIR"])
shutil.rmtree(self.flask_app.config["CM_OUTPUTS_DIR"])
def prepare_file_upload(self, filename, dest_filename=None):
"""Return the data corresponding to a file upload"""
with open(self.get_testdata_path(filename), "rb") as f:
content = f.read()
file_io = io.BytesIO(content)
if not dest_filename:
dest_filename = filename
test_data = {"file": (file_io, dest_filename)}
return test_data, content
def assertStatusCodeEqual(self, response, status_code):
"""Assert that a flask client test status code is
equal to the status_code
"""
self.assertEqual(response.status_code, status_code, response.data)
def labeledTest(*labels):
"""This decorator mark a class as an integrationTest
this is used in the test call for filtering integrationTest
and unittest.
We mark the difference by the usage of service dependency:
* An unittest can run without additional services.
* An integration test need additional services (such as
redis or postgres).
Usage:
@labeledTest("integration")
class FakeOutputTest(BaseApiTest):
pass
"""
def wrapper(cl):
cl._label = set(labels)
return cl
return wrapper
class LabelTestRunner(unittest.runner.TextTestRunner):
"""This testrunner accept a list of whitelist_labels,
It will run all test without a label if no label is
specified. If a label is specified, all testcase class
decorated with labeledTest and having a label in the
whitelist_labels will be ran.
"""
def __init__(self, selection_labels=[], *args, **kwargs):
self.selection_labels = set(*selection_labels)
super(LabelTestRunner, self).__init__(*args, **kwargs)
@classmethod
def flatten_tests(cls, suite):
"""Iterate through the test in a test suite. It will
yield individual tests by flattening the suite into
a list of tests.
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
for t in cls.flatten_tests(test):
yield t
else:
yield test
def run(self, testlist):
"""Change given testlist into a TestSuite.
And then run all the tests of the TestSuite
without (or with the right) label.
"""
# Create TestSuite instance
suite = unittest.TestSuite()
# Add each test in testlist, apply skip mechanism if necessary
for test in self.flatten_tests(testlist):
if hasattr(test, "_label"):
matched_label = test._label.intersection(self.selection_labels)
if matched_label:
suite.addTest(test)
elif not self.selection_labels:
suite.addTest(test)
# Resume normal TextTestRunner function with the created test suite
return super().run(suite)
DEFAULT_API_URL = "http://127.0.0.1:7000"
@labeledTest("integration")
class BaseIntegrationTest(BaseTest):
def wait_for_reachability(self, max_retry=20, wait_time=3):
"""Wait for the api to be reachable by poking its healthz endpoint"""
retry = 0
logging.info("Waiting for the api to be reachable")
while retry <= max_retry:
try:
resp = self.session.get(self.url + "/healthz")
except (
urllib3.exceptions.MaxRetryError,
urllib3.exceptions.TimeoutError,
requests.exceptions.ConnectionError,
requests.exceptions.RequestException,
):
logging.info(".")
else:
if resp.ok:
return True
retry += 1
time.sleep(wait_time)
return False
def setUp(self, *args, **kwargs):
self.url = os.environ.get("API_URL", DEFAULT_API_URL)
self.api_url = self.url + "/api"
self.session = requests.Session()
super().__init__(*args, **kwargs)
if not self.wait_for_reachability():
logging.error("API is not reachable after max retries")
| StarcoderdataPython |
135892 | <reponame>stockdillon/AMPED_MSU_Capstone<filename>comprehend/cam-backend-master-e5bd2e9e571e99ee3c655986cb5895bfa12c9174/deserializer.py<gh_stars>0
import enum
import json
import collections
class ComprehendResponse(object):
def __init__(self,name='Comprehend Response'):
self.name = name
def __repr__(self):
return self.name
class ComprehendSentiment(ComprehendResponse):
def __init__(self):
pass
class ComprehendTextResponse(ComprehendResponse):
def __init__(self,text,score,name='Comprehend Text Response'):
super().__init__(name)
self.text = text
class ComprehendKeyPhrase(ComprehendTextResponse):
def __init__(self,text,score,begin_offset,end_offset):
super(ComprehendKeyPhrase,self).__init__(text,'Comprehend Key Phrase')
self.count = 1
self.offsets = []
self.scores = []
self.add_offset(begin_offset,end_offset)
self.add_score(score)
def inc(self):
self.count += 1
def add_offset(self,begin,end):
self.offsets.append((begin,end))
def add_score(self, score):
self.scores.append(score)
def __repr__(self):
return ('Comprehend Key Phrase: Text: {} Score: {} Count: {} Offsets: {}'.format(
self.text,self.scores,self.count,self.offsets))
class ComprehendEntity(ComprehendTextResponse):
def __init__(self,text,score,begin_offset,end_offset,_type):
super(ComprehendEntity,self).__init__(text,'Comprehend Entity')
self.begin_offset = begin_offset
self.end_offset = end_offset
self.type = _type
self.score = score
def __repr__(self):
return ('Comprehend Entity: Type: {} Text: {} Score: {} BeginOffset: {} EndOffset: {}'.format(
self.type,self.text,self.score,self.begin_offset,self.end_offset))
class EntityType(enum.Enum):
"""
Enumerations for parsing the amazon comprehend data
"""
COMMERCIAL_ITEM = 'COMMERCIAL_ITEM'
DATE = 'DATE'
EVENT = 'EVENT'
LOCATION = 'LOCATION'
ORGANIZATION = 'ORGANIZATION'
OTHER = 'OTHER'
PERSON = 'PERSON'
QUANTITY = 'QUANTITY'
TITLE = 'TITLE'
def entity_hook_handler(json):
return ComprehendEntity(text=json['Text'],
score=json['Score'],
begin_offset=json['BeginOffset'],
end_offset=json['EndOffset'],
_type=json['Type'])
def key_phrase_hook_handler(json):
return ComprehendKeyPhrase(text=json['Text'],
score=json['Score'],
begin_offset=json['BeginOffset'],
end_offset=json['EndOffset'])
ENTITY_TYPES = ['COMMERCIAL_ITEM',
'DATE',
'EVENT',
'LOCATION',
'ORGANIZATION',
'OTHER',
'PERSON',
'QUANTITY',
'TITLE']
class Deserializer(object):
"""
Deserializes the data provided by Amazon's comprehend engine
into a dictionary of entities, where key = type.
Data is parsed as Json where key 'Entities' is a list of dictionaries.
"""
def __init__(self,):
pass
def deserialize_entities(self,data):
"""
deserializes the input data into the format {'entity_type' : [entities]}
"""
entities = data['Entities']
deserialized_entities = {key:[] for key in ENTITY_TYPES}
for entity in entities:
comp = entity_hook_handler(entity)
deserialized_entities[comp.type].append(comp)
for k,v in deserialized_entities.items():
v = v.sort(key=lambda x: x.score, reverse=True)
return deserialized_entities
def deserialize_key_phrases(self,data):
parsed = {}
for key_phrase in data:
text = key_phrase['Text']
if text in parsed:
temp_kp = parsed[text]
temp_kp.inc()
temp_kp.add_score(key_phrase['Score'])
temp_kp.add_offset(key_phrase['BeginOffset'],
key_phrase['EndOffset'])
else:
parsed[text] = key_phrase_hook_handler(key_phrase)
parsed = [v for v in parsed.values()]
parsed.sort(key=lambda x: x.count, reverse=True)
return parsed
| StarcoderdataPython |
3207485 | # coding: utf-8
from memcached import Memcached # noqa
| StarcoderdataPython |
7021 | <reponame>richteer/pyfatafl<gh_stars>0
from module import XMPPModule
import halutils
import pyfatafl
class Game():
self.players = []
self.xmpp = None
self.b = None
self.turn = ""
self.mod = None
def __init__(self, mod, p1, p2):
self.players = [p1, p2]
self.mod = mod
self.xmpp = mod.xmpp
self.xmpp.sendMsg(p2, "You have been challenged to play Hnefatafl by {}, reply with '!hnefatafl accept' to begin!".format(p1))
def begin():
# Send initial board state
self.b = hnefatafl.Board()
self.turn = False # For now, make the challenger be first
self._sendBoard()
def _sendBoard(self)
for i in players:
self.xmpp.sendMsg(i, self.b.getPtBoard() + "\n\n" + "It is '{}''s ({}) turn".format(self.players[self.turn]), "white" if self.turn else "black")
def msg(player, string):
if player != self.players[self.turn]:
self.xmpp.sendMsg(player, "Sorry, it is not your turn!")
m = hnefatafl.Move()
string = "{} {}".format("w" if self.turn else "b", string)
try:
m.parse(string, self.b)
except:
self.xmpp.sendMsg(player, "Invalid move format, see !help hnefatafl")
try:
self.b.move(m)
self._sendBoard()
except Exception as e: # TODO: Have been errors
self.xmpp.sendMsg(player, str(e))
if self.over:
for i in self.players:
self.xmpp.sendMsg(i, "Game over! {} wins!".format(self.b.over))
del self.mod.sessions[i]
# Commented to avoid loading before its ready
class Hnefatafl(XMPPModule):
sessions = {}
def recvMsg(self, msg):
cmd, args = halutils.splitArgList(msg)
if cmd == "!hnefatafl":
if args[0] == "challenge":
if len(args) != 2:
self.xmpp.reply(msg, "Need to the JID of a target")
return
elif arg[1] == msg['body'].bare:
self.xmpp.reply(msg, "You can't challenge yourself...")
# TODO: Validate JID here
g = Game(self, msg['from'].bare, args[1])
self.sessions[msg['from']].bare = g
self.sessions[args[1]] = g
self.xmpp.reply(msg, "Challenge sent!")
elif args[0] == "accept":
if msg['from'].bare not in self.sessions:
self.xmpp.reply(msg, "You have not been challenged!")
return
self.sessions[msg['from'].bare].begin()
elif args[0] == "surrender":
if msg['from'].bare not in self.sessions:
self.xmpp.reply(msg, "You aren't currently in a session")
return
for p in [p for p in self.sessions[msg['from'].bare].players]:
del self.sessions[p]
elif msg['from'].bare in sessions:
self.sessions[msg['from'].bare].msg(msg['from'].bare, msg['body'])
def help(self, string):
if string in ["!hnefatafl", "hnefatafl"]:
return '''
usage: !hnefatafl <command> [arg]
Commands:
challenge <jid> - Send a challenge to JID
accept - Accept a challenge from JID, and begin game
surrender - Surrender the game
'''
return '''
Hnefatafl by XMPP! Play a game against someone through this bot.
Features:
!hnefatafl - Command to challenge, accept, and surrender games
Note: This module will ignore any MUC messages, or other indirect messages
Another Note: This will likely be unplayable if not using a monospace font :)
'''
| StarcoderdataPython |
1703841 | <reponame>ATCtech/Automate-Tasks
from PIL import Image, ImageDraw, ImageFont
import pandas as pd
form = pd.read_csv("test_mail_new.csv")
#name_list = ["<NAME>", "<NAME>", "<NAME>"]
#c_no = ["ABC123", "ABC124", "ABC125"]
c_no = form['certificate_no'].to_list()
name_list = form['receiver_names'].to_list()
for i,j in zip(name_list, c_no):
im = Image.open("Demo-Auto-Template.jpg")
d = ImageDraw.Draw(im)
location = (1370, 970)
text_color = (0, 0, 0)
font = ImageFont.truetype("georgia italic.ttf", 150, encoding="unic")
d.text(location, i, fill=text_color,font=font)
location_new = (755, 2160)
text_color_new = (0, 0, 0)
font_new = ImageFont.truetype("arial.ttf", 55, encoding="unic")
d.text(location_new, j, fill=text_color_new,font=font_new)
im.save("certificate_"+i+".pdf")
| StarcoderdataPython |
3340886 | <filename>raylab/envs/wrappers/gaussian_random_walks.py<gh_stars>10-100
"""Wrapper for introducing irrelevant state variables."""
import gym
import numpy as np
from gym.spaces import Box
from .mixins import IrrelevantRedundantMixin, RNGMixin
from .utils import assert_flat_box_space
class GaussianRandomWalks(IrrelevantRedundantMixin, RNGMixin, gym.ObservationWrapper):
"""Add gaussian random walk variables to the observations.
Arguments:
env: a gym environment instance
size: the number of random walks to append to the observation.
loc: mean of the Gaussian distribution
scale: stddev of the Gaussian distribution
"""
def __init__(self, env: gym.Env, size: int, loc: float = 0.0, scale: float = 1.0):
assert_flat_box_space(env.observation_space, self)
super().__init__(env)
self._size = size
self._loc = loc
self._scale = scale
self._random_walk = None
original = self.env.observation_space
low = np.concatenate([original.low, [-np.inf] * size])
high = np.concatenate([original.high, [np.inf] * size])
self.observation_space = Box(low=low, high=high, dtype=original.dtype)
self._set_reward_if_possible()
self._set_termination_if_possible()
@property
def added_size(self):
return self._size
def _added_vars(self, observation: np.ndarray) -> np.ndarray:
self._random_walk = self._random_walk + self.np_random.normal(
loc=self._loc, scale=self._scale, size=self._size
)
return self._random_walk
def reset(self, **kwargs):
self._random_walk = self.np_random.normal(
loc=self._loc, scale=self._scale, size=self._size
)
return super().reset(**kwargs)
| StarcoderdataPython |
1617882 | #!/usr/bin/env python3
from PyQt5.QtWidgets import QApplication, QSystemTrayIcon, QMenu, QAction
from PyQt5.QtGui import QIcon
import sys
import subprocess
import resources
class Inhibitation():
def __init(self):
self.__cookie = -1
pass
def start(self):
self.__cookie = subprocess.Popen(["/usr/bin/qdbus", "org.freedesktop.PowerManagement", "/org/freedesktop/PowerManagement/Inhibit", "org.freedesktop.PowerManagement.Inhibit.Inhibit", "/usr/bin/plasmashell", "None"], stdout=subprocess.PIPE).communicate()[0]
print(self.__cookie)
def stop(self):
print(subprocess.Popen(["/usr/bin/qdbus", "org.freedesktop.PowerManagement", "/org/freedesktop/PowerManagement/Inhibit", "org.freedesktop.PowerManagement.Inhibit.UnInhibit", self.__cookie], stdout=subprocess.PIPE).communicate()[0])
self.__cookie = -1
def is_started(self):
if self.__cookie == -1:
return False
else:
return True
class TrayIcon(QSystemTrayIcon):
def __init__(self, parent=None):
self.__is_inhibited = False
self.__inhibited_icon = QIcon(":/checked.img")
self.__non_inhibited_icon = QIcon(":/not_checked.img")
QSystemTrayIcon.__init__(self, parent=None)
self.setIcon(self.__non_inhibited_icon)
right_menu = RightClicked(self.quit)
self.setContextMenu(right_menu)
self.setToolTip("Inhibit me ...")
self.activated.connect(self.onActivation)
self.inhibit = Inhibitation()
def onActivation(self, activation_reason):
if activation_reason == QSystemTrayIcon.Trigger:
if self.__is_inhibited is True:
self.__is_inhibited = False
self.setIcon(self.__non_inhibited_icon)
self.inhibit.stop()
elif self.__is_inhibited is False:
self.__is_inhibited = True
self.setIcon(self.__inhibited_icon)
self.inhibit.start()
def quit(self):
if self.inhibit.is_started() is True:
self.inhibit.stop()
QApplication.exit(0)
class RightClicked(QMenu):
def __init__(self, quit_fn, parent=None):
QMenu.__init__(self, parent=None)
quit = QAction("Exit", self)
quit.triggered.connect(quit_fn)
# self.addSeparator()
self.addAction(quit)
if __name__ == '__main__':
app = QApplication(sys.argv)
tray_icon = TrayIcon()
tray_icon.show()
app.exec_()
| StarcoderdataPython |
11238 | <reponame>dnootana/Python<filename>Interview/langTrans.py
#!/usr/bin/env python3.8
table="".maketrans("0123456789","\N{Devanagari digit zero}\N{Devanagari digit one}"
"\N{Devanagari digit two}\N{Devanagari digit three}"
"\N{Devanagari digit four}\N{Devanagari digit five}"
"\N{Devanagari digit six}\N{Devanagari digit seven}"
"\N{Devanagari digit eight}\N{Devanagari digit nine}")
print("0123456789".translate(table)) | StarcoderdataPython |
3398572 | <gh_stars>0
"""API views."""
# Django REST framework
from rest_framework import viewsets
# Models
from .models import Author, Book
# Serializers
from .serializers import AuthorSerializer, BookSerializer
class BookViewSet(viewsets.ModelViewSet):
"""Book Views class."""
queryset = Book.objects.all()
serializer_class = BookSerializer
class AuthorViewSet(viewsets.ModelViewSet):
"""Author Views class."""
queryset = Author.objects.all()
serializer_class = AuthorSerializer
| StarcoderdataPython |
42760 | from tensorflow.keras.layers import (Conv2D, Dense, Flatten, MaxPooling2D,
TimeDistributed)
def VGG16(inputs):
x = Conv2D(64,(3,3),activation = 'relu',padding = 'same',name = 'block1_conv1')(inputs)
x = Conv2D(64,(3,3),activation = 'relu',padding = 'same', name = 'block1_conv2')(x)
x = MaxPooling2D((2,2), strides = (2,2), name = 'block1_pool')(x)
x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv1')(x)
x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv2')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block2_pool')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv1')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv2')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv3')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block3_pool')(x)
# 第四个卷积部分
# 14,14,512
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv1')(x)
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv2')(x)
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv3')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block4_pool')(x)
# 第五个卷积部分
# 7,7,512
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv1')(x)
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv2')(x)
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv3')(x)
return x
def vgg_classifier_layers(x):
# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
x = TimeDistributed(Flatten(name='flatten'))(x)
x = TimeDistributed(Dense(4096, activation='relu'), name='fc1')(x)
x = TimeDistributed(Dense(4096, activation='relu'), name='fc2')(x)
return x
| StarcoderdataPython |
3275284 | masks_folder = '/home/minglee/Documents/aiProjects/git_clone/face-id-with-medical-masks/imageNoneMask'
verbose = 'store_true'
skip_warnings = 'store_true'
database_file = '/home/minglee/Documents/aiProjects/git_clone/face-id-with-medical-masks/folderCreateJsonFIle/data.json'
masks_database_file = '/home/minglee/Documents/aiProjects/git_clone/face-id-with-medical-masks/folderCreateJsonFIle/data.json'
face_dataset_folder = '/home/minglee/Documents/aiProjects/git_clone/face-id-with-medical-masks/folderImagesWithMask'
use_cuda = "store_true"
path_json_file = '/home/minglee/Documents/aiProjects/git_clone/face-id-with-medical-masks/folderCreateJsonFIle/data.json'
img_path = '/home/minglee/Documents/aiProjects/git_clone/face-id-with-medical-masks/imageNoneMask/4.jpeg'
path = '/home/minglee/Documents/aiProjects/git_clone/face-id-with-medical-masks/imageNoneMask'
# /home/minglee/Documents/aiProjects/git_clone/face-id-with-medical-masks/data/masked_faces | StarcoderdataPython |
3322056 | <gh_stars>10-100
#!/usr/bin/env python3
import json
import sys
# requires a full design space
try:
f = sys.argv[1]
except:
print("Expects report json file: ./pretty_print.py <report>.json")
exit(-1)
handle = open(f, "r")
x = json.load(handle)
handle.close()
for i in range(1, 9):
for j in range(1, 9):
for k in range(1, 9):
try:
data = x[str(i)][str(j)][str(k)]
unoptimized = float(data["unoptimized"])
optimized = float(data["optimized"])
eigen = float(data["baseline"])
speedup = unoptimized / optimized
espeedup = eigen / optimized
print("{}\t {}\t {}\t {}\t {} \t {} \t {} \t {} \t {}".format(i, j, k, data["Test Failure"], speedup, espeedup, eigen, unoptimized, optimized))
except:
print("{}\t {}\t {}\t EXCEPTION".format(i, j, k))
# Only uncomment this if the entire 8x8x8 run is completed and you want to see the heatmap
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 2
input_cols = ["1", "2", "3", "4", "5", "6", "7", "8"]
output_cols = ["1", "2", "3", "4", "5", "6", "7", "8"]
results = np.zeros((8, 8))
for i in range(1, 9):
for j in range(1, 9):
data = x["1"][str(i)][str(j)]
results[i-1, j-1] = float(data["unoptimized"]) / float(data["optimized"])
results = np.around(results, 2)
fig, axs = plt.subplots(8)
ax = axs[0]
im = ax.imshow(results)
# We want to show all ticks...
ax.set_xticks(np.arange(len(output_cols)))
ax.set_yticks(np.arange(len(input_cols)))
# ... and label them with the respective list entries
ax.set_xticklabels(output_cols)
ax.set_yticklabels(input_cols)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(input_cols)):
for j in range(len(output_cols)):
text = ax.text(j, i, results[i, j],
ha="center", va="center", color="w")
ax.set_title("Speed up results")
fig.tight_layout()
plt.show()
"""
| StarcoderdataPython |
3378340 | # -*- coding: utf-8 -*-
import logging
import util
import os
import os.path as path
from os.path import basename
from util import bcolors
from os.path import join
from shutil import copyfile
def open_assignment(backend, config, station):
"""
Otvara pojedinačan studentski zadatak na osnovu zadatog naziva računara na kojem je urađen zadatak
Zadatak se kopira na putanju config.CURRENT_ASSIGNMENT_PATH, trenutno selektovan računar se menja na zadati.
Nakon toga poziva se metoda backend.after_assignment_loaded() koja dozvoljava dodatne akcije nakon otvaranja
zadatka.
config - globalna konfiguracija alata za pregled
station - oznaka računara na kojem je zadatak urađen
"""
print 'Otvaranje studentskog zadatka sa racunara {0}...'.format(station)
logging.info('Otvaranje studentskog zadatka sa racunara: {0}'.format(station))
start_dir = join(config.ASSIGNMENTS_PATH, station)
matches = backend.find_project_recursive(start_dir)
if len(matches) == 0:
print bcolors.FAIL \
+ 'U direktorijumu "{0}" nije pronadjen fajl za identifikaciju projekta (pattern: "{1}")!'\
.format(start_dir, backend.get_project_pattern()) + bcolors.ENDC
return False
if len(matches) > 1:
print bcolors.FAIL \
+ 'U direktorijumu "{0}" pronadjeno je vise direktorijuma kandidata za projektni direktorijum: {1}!'\
.format(start_dir, matches) + bcolors.ENDC
return False
util.clear_directory(config.CURRENT_ASSIGNMENT_PATH)
util.clear_directory(config.CURRENT_ALT_ASSIGNMENT_PATH)
onlyfiles = [f for f in os.listdir(matches[0]) if path.isfile(join(matches[0], f))]
for file in onlyfiles:
src = join(matches[0], file)
dst = join(config.CURRENT_ASSIGNMENT_PATH, os.path.basename(file))
copyfile(src, dst)
alt = join(config.ALTERED_ASSIGNMENTS_PATH, station)
if path.isdir(alt):
print bcolors.BOLD + 'Postoji i izmenjeno resenje ovog zadatka, pa se ono kopira u: "{0}"'\
.format(config.CURRENT_ALT_ASSIGNMENT_PATH) + bcolors.ENDC
onlyfiles = [f for f in os.listdir(alt) if path.isfile(join(alt, f))]
for file in onlyfiles:
src = join(alt, file)
dst = join(config.CURRENT_ALT_ASSIGNMENT_PATH, os.path.basename(file))
copyfile(src, dst)
write_current_station(config, station)
proj = basename(util.identify_project_file(backend, config.CURRENT_ASSIGNMENT_PATH))
print('Identifikovani projektni fajl: {0}'.format(proj))
try:
backend.after_assignment_loaded(config.CURRENT_ASSIGNMENT_PATH, proj)
except RuntimeError as err:
util.fatal_error(err.message)
def close_current_assignment(config, current_station):
"""
Zatvara trenutno otvoreni studentski zadatak
config - globalna konfiguracija alata za pregled
current_station - oznaka trenutno selektovanog računara
"""
print 'Zatvaranje studentskog zadatka {0}, posto se prelazi na drugi...'.format(current_station)
onlyfiles = [f for f in os.listdir(config.CURRENT_ALT_ASSIGNMENT_PATH)
if path.isfile(join(config.CURRENT_ALT_ASSIGNMENT_PATH, f))]
if len(onlyfiles) > 0:
print('Postoji alternativna verzija zadatka, pa se ona kopira u {0}'.format(config.ALTERED_ASSIGNMENTS_PATH))
altered = join(config.ALTERED_ASSIGNMENTS_PATH, current_station)
util.make_sure_path_exists(altered)
logging.info('Detektovano je da postoji alternativna varijanta zadatka, koja je kopirana u: {0}'
.format(altered))
print('Kopiranje fajlova iz "{0}" u "{1}"'.format(config.CURRENT_ALT_ASSIGNMENT_PATH, altered))
for f in onlyfiles:
copyfile(join(config.CURRENT_ALT_ASSIGNMENT_PATH, f), join(altered, f))
def copy_assignment_to_alt(config):
"""
Kopira trenutni zadatak (u svom trenutnom stanju na kojem može biti izmena) u config.CURRENT_ALT_ASSIGNMENT_PATH -
putanju sa koje će se zadatak pohraniti u repozitorijum alternativnih verzija zadatka
Praktično proglašava trenutne izmene na zadatku zvaničnim, beleži ih kao izmene koje je pregledač napravio na
zadatku.
config - globalna konfiguracija alata za pregled
"""
print 'Pravljenje kopije zadatka u direktorijum za prepravke: "{0}"'.format(config.CURRENT_ALT_ASSIGNMENT_PATH)
logging.info('Inicirano je pravljenje kopije zadatka radi izrade alternativne varijante')
util.clear_directory(config.CURRENT_ALT_ASSIGNMENT_PATH)
onlyfiles = [f for f in os.listdir(config.CURRENT_ASSIGNMENT_PATH)
if path.isfile(join(config.CURRENT_ASSIGNMENT_PATH, f))]
for file in onlyfiles:
src = join(config.CURRENT_ASSIGNMENT_PATH, file)
dst = join(config.CURRENT_ALT_ASSIGNMENT_PATH, file)
copyfile(src, dst)
def read_current_station(config):
"""
Vraća oznaku trenutno selektovanog računara
Ukoliko nema trenutno selektovanog računara, vraća prazan string.
config - globalna konfiguracija alata za pregled
"""
if path.isfile(config.STATION_FILENAME):
with open (config.STATION_FILENAME, 'r') as rfile:
return rfile.read()
else:
return ''
def write_current_station(config, station):
"""
Upisuje oznaku trenutno selektovanog računara u fajl u kojem se evidentira ta informacija
config - globalna konfiguracija alata za pregled
station - oznaka trenutno selektovanog računara
"""
with open (config.STATION_FILENAME, 'w') as wfile:
wfile.write(station) | StarcoderdataPython |
183493 | <reponame>NinjaDero/Directly<gh_stars>1-10
from Directly import Ext
@Ext.cls
class Buttons():
@staticmethod
@Ext.method
def ping(request):
return "Pong!"
@staticmethod
@Ext.method
def reverse(request, text):
return text[::-1]
@staticmethod
@Ext.method
def full_caps(request, text):
all_caps = Buttons.make_caps(text)
return all_caps
@staticmethod
@Ext.method
def full_lows(request, text):
all_lows = Buttons.make_lows(text)
return all_lows
# Not included, remains hidden to Ext.direct.Manager
# You don't have to separate your exposed and hidden methods, if you don't want to.
# They can also not be called if the Manager is edited manually
@staticmethod
def make_caps(_text):
if 'upper' in dir(_text):
_text = _text.upper()
return _text
@staticmethod
def make_lows(_text):
if 'lower' in dir(_text):
_text = _text.lower()
return _text | StarcoderdataPython |
1779670 | #!/usr/bin/python3
import json, argparse, time, logging
import requests, requests.packages
import os
import sys
sys.path.append(r"/usr/local/fworch/importer")
import fwcommon, common, getter
requests.packages.urllib3.disable_warnings() # suppress ssl warnings only
parser = argparse.ArgumentParser(description='Read configuration from Check Point R8x management via API calls')
parser.add_argument('-a', '--apihost', metavar='api_host', required=True, help='Check Point R8x management server')
parser.add_argument('-w', '--password', metavar='api_password_file', default='import_user_secret', help='name of the file to read the password for management server from')
parser.add_argument('-u', '--user', metavar='api_user', default='fworch', help='user for connecting to Check Point R8x management server, default=fworch')
parser.add_argument('-p', '--port', metavar='api_port', default='443', help='port for connecting to Check Point R8x management server, default=443')
parser.add_argument('-D', '--domain', metavar='api_domain', default='', help='name of Domain in a Multi-Domain Envireonment')
parser.add_argument('-l', '--layer', metavar='policy_layer_name(s)', required=True, help='name of policy layer(s) to read (comma separated)')
parser.add_argument('-x', '--proxy', metavar='proxy_string', default='', help='proxy server string to use, e.g. 192.168.3.11:8080; default=empty')
parser.add_argument('-s', '--ssl', metavar='ssl_verification_mode', default='', help='[ca]certfile, if value not set, ssl check is off"; default=empty/off')
parser.add_argument('-i', '--limit', metavar='api_limit', default='150', help='The maximal number of returned results per HTTPS Connection; default=150')
parser.add_argument('-d', '--debug', metavar='debug_level', default='0', help='Debug Level: 0(off) 4(DEBUG Console) 41(DEBUG File); default=0')
parser.add_argument('-t', '--testing', metavar='version_testing', default='off', help='Version test, [off|<version number>]; default=off')
parser.add_argument('-c', '--configfile', metavar='config_file', required=True, help='filename to read and write config in json format from/to')
parser.add_argument('-n', '--noapi', metavar='mode', default='false', help='if set to true (only in combination with mode=enrich), no api connections are made. Useful for testing only.')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help(sys.stderr)
sys.exit(1)
api_host = args.apihost
api_port = args.port
config_filename = args.configfile
with open(args.password, "r") as password_file:
api_password = password_file.read().rstrip()
api_domain = args.domain
test_version = args.testing
proxy_string = { "http" : args.proxy, "https" : args.proxy }
offset = 0
limit = args.limit
details_level = "full" # 'standard'
testmode = args.testing
base_url = 'https://' + api_host + ':' + api_port + '/web_api/'
json_indent=2
use_object_dictionary = 'false'
svc_objects = []
nw_objects = []
nw_objs_from_obj_tables = []
svc_objs_from_obj_tables = []
# logging config
debug_level = int(args.debug)
common.set_log_level(log_level=debug_level, debug_level=debug_level)
ssl_verification = getter.set_ssl_verification(args.ssl)
starttime = int(time.time())
# read json config data
with open(config_filename, "r") as json_data:
config = json.load(json_data)
# do nothing for empty config
if len(config) == 0:
sys.exit(0)
#################################################################################
# adding inline and domain layers
#################################################################################
found_new_inline_layers = True
old_inline_layers = []
while found_new_inline_layers is True:
# sweep existing rules for inline layer links
inline_layers = []
for rulebase in config['rulebases']:
getter.get_inline_layer_names_from_rulebase(rulebase, inline_layers)
if len(inline_layers) == len(old_inline_layers):
found_new_inline_layers = False
else:
old_inline_layers = inline_layers
for layer in inline_layers:
logging.debug ( "enrich_config - found inline layer " + layer )
# enrich config --> get additional layers referenced in top level layers by name
# also handle possible recursion (inline layer containing inline layer(s))
# get layer rules from api
# add layer rules to config
# next phase: how to logically link layer guard with rules in layer? --> AND of src, dst & svc between layer guard and each rule in layer?
# for rulebase in config['rulebases']:
# for rule in rulebase:
# if 'type' in rule and rule['type'] == 'place-holder':
# logging.debug("enrich_config: found domain rule ref: " + rule["uid"])
#################################################################################
# get object data which is only contained as uid in config by making addtional api calls
#################################################################################
# get all object uids (together with type) from all rules in fields src, dst, svc
nw_uids_from_rulebase = []
svc_uids_from_rulebase = []
for rulebase in config['rulebases']:
logging.debug ( "enrich_config - searching for all uids in rulebase: " + rulebase['layername'] )
getter.collect_uids_from_rulebase(rulebase, nw_uids_from_rulebase, svc_uids_from_rulebase, "top_level")
# remove duplicates from uid lists
nw_uids_from_rulebase = list(set(nw_uids_from_rulebase))
svc_uids_from_rulebase = list(set(svc_uids_from_rulebase))
# get all uids in objects tables
for obj_table in config['object_tables']:
nw_objs_from_obj_tables.extend(getter.get_all_uids_of_a_type(obj_table, fwcommon.nw_obj_table_names))
svc_objs_from_obj_tables.extend(getter.get_all_uids_of_a_type(obj_table, getter.svc_obj_table_names))
# identify all objects (by type) that are missing in objects tables but present in rulebase
missing_nw_object_uids = getter.get_broken_object_uids(nw_objs_from_obj_tables, nw_uids_from_rulebase)
missing_svc_object_uids = getter.get_broken_object_uids(svc_objs_from_obj_tables, svc_uids_from_rulebase)
# adding the uid of the Original object for natting:
missing_nw_object_uids.append(fwcommon.original_obj_uid)
missing_svc_object_uids.append(fwcommon.original_obj_uid)
logging.debug ( "enrich_config - found missing nw objects: '" + ",".join(missing_nw_object_uids) + "'" )
logging.debug ( "enrich_config - found missing svc objects: '" + ",".join(missing_svc_object_uids) + "'" )
if args.noapi == 'false':
sid = getter.login(args.user,api_password,api_host,args.port,api_domain,ssl_verification, proxy_string)
v_url = getter.get_api_url (sid, api_host, args.port, args.user, base_url, limit, test_version,ssl_verification, proxy_string)
logging.debug ( "enrich_config - logged into api" )
# if an object is not there:
# make api call: show object details-level full uid "<uid>" and add object to respective json
for missing_obj in missing_nw_object_uids:
if args.noapi == 'false':
show_params_host = {'details-level':details_level,'uid':missing_obj}
logging.debug ( "checkpointR8x/enrich_config - fetching obj with uid: " + missing_obj)
obj = getter.api_call(api_host, args.port, v_url, 'show-object', show_params_host, sid, ssl_verification, proxy_string)
obj = obj['object']
if (obj['type'] == 'CpmiAnyObject'):
json_obj = {"object_type": "hosts", "object_chunks": [ {
"objects": [ {
'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
'comments': 'any nw object checkpoint (hard coded)',
'type': 'CpmiAnyObject', 'ipv4-address': '0.0.0.0/0',
} ] } ] }
config['object_tables'].append(json_obj)
elif (obj['type'] == 'simple-gateway' or obj['type'] == 'CpmiGatewayPlain' or obj['type'] == 'interop'):
json_obj = {"object_type": "hosts", "object_chunks": [ {
"objects": [ {
'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
'comments': obj['comments'], 'type': 'host', 'ipv4-address': fwcommon.get_ip_of_obj(obj),
} ] } ] }
config['object_tables'].append(json_obj)
elif obj['type'] == 'multicast-address-range':
logging.debug("enrich_config - found multicast-address-range: " + obj['name'] + " (uid:" + obj['uid']+ ")")
json_obj = {"object_type": "hosts", "object_chunks": [ {
"objects": [ {
'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
'comments': obj['comments'], 'type': 'host', 'ipv4-address': fwcommon.get_ip_of_obj(obj),
} ] } ] }
config['object_tables'].append(json_obj)
elif (obj['type'] == 'CpmiVsClusterMember' or obj['type'] == 'CpmiVsxClusterMember'):
json_obj = {"object_type": "hosts", "object_chunks": [ {
"objects": [ {
'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
'comments': obj['comments'], 'type': 'host', 'ipv4-address': fwcommon.get_ip_of_obj(obj),
} ] } ] }
config['object_tables'].append(json_obj)
logging.debug ('missing obj: ' + obj['name'] + obj['type'])
elif (obj['type'] == 'Global'):
json_obj = {"object_type": "hosts", "object_chunks": [ {
"objects": [ {
'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
'comments': obj['comments'], 'type': 'host', 'ipv4-address': '0.0.0.0/0',
} ] } ] }
config['object_tables'].append(json_obj)
logging.debug ('missing obj: ' + obj['name'] + obj['type'])
else:
logging.warning ( "checkpointR8x/enrich_config - missing nw obj of unexpected type '" + obj['type'] + "': " + missing_obj )
print ("WARNING - enrich_config - missing nw obj of unexpected type: '" + obj['type'] + "': " + missing_obj)
logging.debug ( "enrich_config - missing nw obj: " + missing_obj )
print ("INFO: adding nw obj missing from standard api call results: " + missing_obj)
for missing_obj in missing_svc_object_uids:
if args.noapi == 'false':
show_params_host = {'details-level':details_level,'uid':missing_obj}
obj = getter.api_call(api_host, args.port, v_url, 'show-object', show_params_host, sid, ssl_verification, proxy_string)
obj = obj['object']
# print(json.dumps(obj))
# currently no svc objects are found missing, not even the any obj?
if (obj['type'] == 'CpmiAnyObject'):
json_obj = {"object_type": "services-other", "object_chunks": [ {
"objects": [ {
'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
'comments': 'any svc object checkpoint (hard coded)',
'type': 'service-other', 'ip-protocol': '0'
} ] } ] }
config['object_tables'].append(json_obj)
elif (obj['type'] == 'Global'):
json_obj = {"object_type": "services-other", "object_chunks": [ {
"objects": [ {
'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
'comments': 'Original svc object checkpoint (hard coded)',
'type': 'service-other', 'ip-protocol': '0'
} ] } ] }
config['object_tables'].append(json_obj)
else:
logging.warning ( "checkpointR8x/enrich_config - missing svc obj of unexpected type: " + missing_obj )
print ("WARNING - enrich_config - missing svc obj of unexpected type: '" + obj['type'] + "': " + missing_obj)
logging.debug ( "enrich_config - missing svc obj: " + missing_obj )
print ("INFO: adding svc obj missing from standard api call results: " + missing_obj)
# dump new json file
if args.noapi == 'false':
if os.path.exists(config_filename): # delete json file (to enabiling re-write)
os.remove(config_filename)
with open(config_filename, "w") as json_data:
json_data.write(json.dumps(config))
# json_data.write(json.dumps(config,indent=json_indent))
if args.noapi == 'false':
logout_result = getter.api_call(api_host, args.port, v_url, 'logout', '', sid, ssl_verification, proxy_string)
#logout_result = api_call(api_host, args.port, base_url, 'logout', {}, sid)
duration = int(time.time()) - starttime
logging.debug ( "checkpointR8x/enrich_config - duration: " + str(duration) + "s" )
sys.exit(0)
| StarcoderdataPython |
154725 | <reponame>tshu-w/deep-learning-project-template
#!/usr/bin/env python
import json
import logging
from collections import ChainMap
from datetime import datetime
from pathlib import Path
from typing import Any
import shtab
from pytorch_lightning.loggers import LightningLoggerBase, LoggerCollection
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities.cli import LightningArgumentParser, LightningCLI
from rich import print
class LitCLI(LightningCLI):
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
for arg in ["batch_size", "num_labels", "task_name"]:
parser.link_arguments(
f"data.init_args.{arg}",
f"model.init_args.{arg}",
apply_on="instantiate",
)
def modify_logger(self, logger: LightningLoggerBase, exp_name: str, version: str):
if exp_name and hasattr(logger, "_name"):
logger._name = exp_name
if version and hasattr(logger, "_version"):
logger._version = version
def before_run(self):
model_name = type(self.model).__name__
datamodule_name = type(self.datamodule).__name__ if self.datamodule else ""
exp_name = "_".join(filter(None, [model_name, datamodule_name]))
model_version = (
self.model.get_version() if hasattr(self.model, "get_version") else ""
)
datamodule_version = (
self.datamodule.get_version()
if hasattr(self.datamodule, "get_version")
else ""
)
seed = str(self._get(self.config, "seed_everything"))
timestramp = datetime.now().strftime("%m%d-%H%M%S")
version = "_".join(
filter(None, [model_version, datamodule_version, seed, timestramp])
)
log_dir = (
f"{self.trainer.default_root_dir}/{exp_name.lower()}/{version.lower()}"
)
print(f"Experiment: [bold]{exp_name}[/bold]")
print(f"Version: [bold]{version}[/bold]")
print(f"Log Dir: [bold]{log_dir}[/bold]")
if isinstance(self.trainer.logger, LoggerCollection):
for logger in self.trainer.logger:
self.modify_logger(logger, exp_name.lower(), version.lower())
else:
self.modify_logger(self.trainer.logger, exp_name.lower(), version.lower())
if self.subcommand in ["validate", "test"]:
self.config_init[self.subcommand]["verbose"] = False
before_fit = before_validate = before_test = before_run
def after_run(self):
results = {}
if self.trainer.state.fn == TrainerFn.FITTING:
if (
self.trainer.checkpoint_callback
and self.trainer.checkpoint_callback.best_model_path
):
ckpt_path = self.trainer.checkpoint_callback.best_model_path
# Disable useless logging
logging.getLogger("pytorch_lightning.utilities.distributed").setLevel(
logging.WARNING
)
logging.getLogger("pytorch_lightning.accelerators.gpu").setLevel(
logging.WARNING
)
self.trainer.callbacks = []
fn_kwargs = {
"model": self.model,
"datamodule": self.datamodule,
"ckpt_path": ckpt_path,
"verbose": False,
}
has_val_loader = (
self.trainer._data_connector._val_dataloader_source.is_defined()
)
has_test_loader = (
self.trainer._data_connector._test_dataloader_source.is_defined()
)
val_results = (
self.trainer.validate(**fn_kwargs) if has_val_loader else []
)
test_results = self.trainer.test(**fn_kwargs) if has_test_loader else []
results = dict(ChainMap(*val_results, *test_results))
else:
results = self.trainer.logged_metrics
if results:
results_str = json.dumps(results, ensure_ascii=False, indent=2)
print(results_str)
metrics_file = Path(self.trainer.log_dir) / "metrics.json"
with metrics_file.open("w") as f:
f.write(results_str)
after_fit = after_validate = after_test = after_run
def setup_parser(
self,
add_subcommands: bool,
main_kwargs: dict[str, Any],
subparser_kwargs: dict[str, Any],
) -> None:
"""Initialize and setup the parser, subcommands, and arguments."""
self.parser = self.init_parser(**main_kwargs)
shtab.add_argument_to(self.parser, ["-s", "--print-completion"])
if add_subcommands:
self._subcommand_method_arguments: dict[str, list[str]] = {}
self._add_subcommands(self.parser, **subparser_kwargs)
else:
self._add_arguments(self.parser)
| StarcoderdataPython |
3382520 | from ideas.examples import switch
from ideas.import_hook import remove_hook
def test_transform():
source = """
switch EXPR:
case EXPR_1:
SUITE
case EXPR_2:
SUITE
case in (EXPR_3, EXPR_4, ...):
SUITE
else:
SUITE
other_code"""
expected = """
_1 = EXPR
if _1 == EXPR_1:
SUITE
elif _1 == EXPR_2:
SUITE
elif _1 in (EXPR_3, EXPR_4, ...):
SUITE
else:
SUITE
del _1
other_code"""
result = switch.transform_source(source, callback_params={"predictable_names": True})
assert result == expected, "Switch conversion test"
def test_switch1():
hook = switch.add_hook()
from . import switch1 # noqa
remove_hook(hook)
| StarcoderdataPython |
3312684 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import base64
import hashlib
import HTMLParser
import json
import random
import re
import string
import sys
import time
import urllib2
import requests
import xbmcgui
from bs4 import BeautifulSoup
from xbmcswift2 import Plugin
import danmuku
import os
def random_sentence(size):
char_lists = string.ascii_lowercase + string.digits
return ''.join(random.choice(char_lists) for _ in range(size))
def unescape(string):
string = urllib2.unquote(string).decode('utf8')
quoted = HTMLParser.HTMLParser().unescape(string).encode('utf-8')
#转成中文
return re.sub(r'%u([a-fA-F0-9]{4}|[a-fA-F0-9]{2})', lambda m: unichr(int(m.group(1), 16)), quoted)
plugin = Plugin()
#超过10000换算
def zh(num):
if int(num) >= 100000000:
p = round(float(num)/float(100000000), 1)
p = str(p) + '亿'
else:
if int(num) >= 10000:
p = round(float(num)/float(10000), 1)
p = str(p) + '万'
else:
p = str(num)
return p
#白嫖计算
def bp(li):
li = sorted(li)
minnum = li[0]
v = 0
for index in range(len(li)):
v += li[index] - minnum
v += minnum
return v
his = plugin.get_storage('his')
cache = plugin.get_storage('cache')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
mheaders = {'user-agent' : 'Mozilla/5.0 (Linux; Android 10; Z832 Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Mobile Safari/537.36'}
def chushihua(key,default):
if key in cache:
switch = cache[key]
else:
cache[key] = default
switch = cache[key]
if switch == 1:
value = '开'
else:
value = '关'
return value
#仿b站随机输出主播正在xx
def rd_live_gz():
gz = ['思考人生','抠脚丫','俯卧撑热身','卖萌','打滚','换女装','觅食','海扁小电视','成为魔法少女','修电锯','跳广播体操','吸猫','渡劫中','学习','梳妆打扮','撩妹','逛漫展','拔腿毛','逛b站']
return '主播正在' + gz[random.randint(0,(len(gz)-1))] + '...'
#b站二级目录tid反查一级目录名字,没有api,只能自己造轮子
def two_one(tid):
#动画区
douga = [1,24,25,47,86,27]
#番剧区
anime = [51,152]
#音乐区
music = [3,28,31,30,194,59,193,29,130]
#国创区
guochuang = [153,168,169,195,170]
#舞蹈区
dance = [129,20,198,199,200,154,156]
#游戏区
game = [4,17,171,172,65,173,121,136,19]
#科技区
technology = [36,124,122,39,96,98,176]
#数码区
digital = [188,95,189,190,191]
#生活区
life = [160,138,21,76,75,161,162,163,174]
#鬼畜区
kichiku = [119,22,26,126,127]
#时尚区
fashion = [155,157,158,164,159,192]
#广告区
ad = [165,166]
#娱乐区
ent = [5,71,137,131]
#影视区
cinephile = [181,182,183,85,184]
re = ''
if int(tid) in douga:
re = '动画'
if int(tid) in anime:
re = '番剧'
if int(tid) in music:
re = '音乐'
if int(tid) in guochuang:
re = '国创'
if int(tid) in dance:
re = '舞蹈'
if int(tid) in game:
re = '游戏'
if int(tid) in technology:
re = '科技'
if int(tid) in digital:
re = '数码'
if int(tid) in life:
re = '生活'
if int(tid) in kichiku:
re = '鬼畜'
if int(tid) in fashion:
re = '时尚'
if int(tid) in ad:
re = '广告'
if int(tid) in ent:
re = '娱乐'
if int(tid) in cinephile:
re = '影视'
if re == '':
re = '未知'
return re
#显示等级颜色
def level_color(level):
if int(level) == 0:
lev = ' [COLOR grey]Lv.' + str(level) +'[/COLOR]'
if int(level) == 1:
lev = ' [COLOR grey]Lv.' + str(level) +'[/COLOR]'
if int(level) == 2:
lev = ' [COLOR green]Lv.' + str(level) +'[/COLOR]'
if int(level) == 3:
lev = ' [COLOR blue]Lv.' + str(level) +'[/COLOR]'
if int(level) == 4:
lev = ' [COLOR yellow]Lv.' + str(level) +'[/COLOR]'
if int(level) == 5:
lev = ' [COLOR orange]Lv.' + str(level) +'[/COLOR]'
if int(level) == 6:
lev = ' [COLOR red]Lv.' + str(level) +'[/COLOR]'
return lev
def sessdata():
if 'sessdata' in cache:
sessdata = 'SESSDATA=' + cache['sessdata']
else:
sessdata = ''
return sessdata
@plugin.cached(TTL=1)
def get_up_roomold(uid):
r = requests.get('https://api.live.bilibili.com/room/v1/Room/getRoomInfoOld?mid='+str(uid), headers=headers)
r.encoding = 'UTF-8'
return r.text
@plugin.cached(TTL=60)
def get_up_baseinfo(uid):
r = requests.get('https://api.bilibili.com/x/space/acc/info?mid='+str(uid)+'&jsonp=jsonp', headers=headers)
r.encoding = 'UTF-8'
return r.text
@plugin.cached(TTL=60)
def get_upinfo(uid):
j = json.loads(get_up_baseinfo(uid))
u = j['data']
#up数据
di = up_allnum(uid)
#最新数据
new = up_sort_vid(uid,'')
#热门数据
click = up_sort_vid(uid,'click')
#热门数据
stow = up_sort_vid(uid,'stow')
text = ''
#大会员判断
vip = u''
if u['vip']['status'] == 1:
if u['vip']['type'] == 2:
vip += u'[COLOR pink][年度大会员][/COLOR]'
else:
vip += u'[COLOR pink][大会员][/COLOR]'
#性别判断
if u['sex'] == u'男':
sex = u'[COLOR blue][♂][/COLOR]'
else:
if u['sex'] == u'女':
sex = u'[COLOR pink][♀][/COLOR]'
else:
sex = u'?'
text += u'UP主:' + u['name'] + u' 性别:' + sex + u' '+ level_color(u['level']) + vip +'\n'
if u['official']['role'] != 0:
text += u['official']['title'] +'\n'
if u['sign'] == '':
sign = u'这个人很懒,什么都没有写'
else:
sign = u['sign']
text += u'个性签名:' + sign + '\n'
text += u'生日:' + u['birthday'] + '\n'
text += u'----------'*5 + u'up主大数据' + u'----------'*5 + '\n\n'
if u['official']['role'] != 0:
text += u'「'+u['name']+ u'」是经过认证的「'+u['official']['title']+ u'」。' +'\n'
else:
text += u'「'+u['name']+ u'」是一个不太出名的up主。' +'\n'
tlist = new['list']['tlist']
try:
tt = list(tlist.keys())
ttt = []
for index in range(len(tt)):
ttt.append(tlist[tt[index]])
hotp = sorted(ttt,key=lambda x:x['count'],reverse=True)
text += u'TA是主要活跃在「'+ hotp[0]['name'] + u'区」的UP主,在该分区共投稿「'+ str(hotp[0]['count']) + u' 个稿件」'+'\n'
if two_one(click['list']['vlist'][0]['typeid']) != hotp[0]['name'].encode('utf-8'):
text += u'然而TA在「' + two_one(click['list']['vlist'][0]['typeid']).decode('utf-8') + u'」拥有着表现最好的作品'+'\n'
text += u'代表作是《 ' + click['list']['vlist'][0]['title'] + u'》。'+'\n'
jinqi = []
for index in range(len(new['list']['vlist'])):
jinqi.append(two_one(new['list']['vlist'][index]['typeid']))
jinqi2 =[]
for index in range(len(jinqi)):
if jinqi[index] != max(jinqi, key=jinqi.count):
jinqi2.append(jinqi[index])
if jinqi2 != []:
if jinqi2.count(max(jinqi2, key=jinqi2.count)) < jinqi.count(max(jinqi, key=jinqi.count)):
text += u'近期,TA的投稿仍然大多在「'+max(jinqi, key=jinqi.count).decode('utf-8') + u'区」。' + '\n\n'
else:
text += u'虽然TA的投稿仍然大多在「'+max(jinqi, key=jinqi.count).decode('utf-8')+u'」,不过TA有向「'+max(jinqi2, key=jinqi2.count).decode('utf-8')+u'」转型的可能性。' + '\n\n'
else:
text += u'近期,TA的投稿1000%在「'+max(jinqi, key=jinqi.count).decode('utf-8') + u'区」。' + '\n\n'
except AttributeError:
text += u'没有更多关于TA的情报信息了\n\n'
text += u'----------'*5 + u'up主最新数据' + u'----------'*5 + '\n\n'
text += u'粉丝总数:' + zh(di['follower']).decode('utf-8') + u' 播放总数:' + zh(di['archive']).decode('utf-8') + u' 获赞总数:' + zh(di['likes']).decode('utf-8') + u' 专栏阅读:' + zh(di['article']).decode('utf-8') + '\n\n'
try:
text += u'----------'*5 + u'up主投稿分区' + u'----------'*5 + '\n\n'
for index in range(len(hotp)):
co = u'|'*int((float(hotp[index]['count'])/float(new['page']['count']))*100)
if len(hotp[index]['name']) == 2:
name = hotp[index]['name'] +u'区'
else:
name = hotp[index]['name']
text += name + u':' + str(co) + u' ' + str(round((float(hotp[index]['count'])/float(new['page']['count']))*100,2)) + u'% - ' +str(hotp[index]['count']) + u'个投稿\n'
except UnboundLocalError:
text += u'没有更多关于TA的情报信息了'
return text
#最新投稿 ‘’ 播放最多 click 收藏最多stow
@plugin.cached(TTL=10)
def up_sort_vid(uid,sort):
if sort == '':
so = ''
else:
so = '&order=' +sort
r = requests.get('https://api.bilibili.com/x/space/arc/search?mid='+uid+'&pn=1&ps=25'+so+'&jsonp=jsonp', headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
return j['data']
#up播放数据
@plugin.cached(TTL=10)
def up_allnum(uid):
di = {}
r = requests.get('https://api.bilibili.com/x/relation/stat?vmid='+uid+'&jsonp=jsonp', headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#关注
di['following'] = j['data']['following']
#粉丝
di['follower'] = j['data']['follower']
r = requests.get('https://api.bilibili.com/x/space/upstat?mid='+uid+'&jsonp=jsonp', headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#播放量
di['archive'] = j['data']['archive']['view']
#专栏
di['article'] = j['data']['article']['view']
#获赞数
di['likes'] = j['data']['likes']
return di
@plugin.cached(TTL=10)
def get_search(keyword,page):
serachUrl = 'https://api.bilibili.com/x/web-interface/search/all/v2?keyword=' + keyword + '&page=' + str(page)
r = requests.get(serachUrl, headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#视频
k = j['data']['result'][8]['data']
#番剧
bgm = j['data']['result'][3]['data']
#影视
mov = j['data']['result'][4]['data']
videos = []
for index in range(len(bgm)):
surl = 'https://www.bilibili.com/bangumi/play/ss' + str(bgm[index]['season_id'])
title = '[COLOR pink][' + bgm[index]['season_type_name'] + '] ' + bgm[index]['title'] + '[/COLOR]'
pic = bgm[index]['cover']
#清除b站api数据污染
title = title.replace('<em class="keyword">', '')
title = title.replace('</em>', '')
videoitem = {}
videoitem['name'] = title
videoitem['href'] = surl
videoitem['thumb'] = 'http:'+pic
videos.append(videoitem)
for index in range(len(mov)):
surl = 'https://www.bilibili.com/bangumi/play/ss' + str(mov[index]['season_id'])
title = '[COLOR pink][' + mov[index]['season_type_name'] + '] ' + mov[index]['title'] + '[/COLOR]'
pic = mov[index]['cover']
#清除b站api数据污染
title = title.replace('<em class="keyword">', '')
title = title.replace('</em>', '')
videoitem = {}
videoitem['name'] = title
videoitem['href'] = surl
videoitem['thumb'] = 'http:'+pic
videos.append(videoitem)
#k = k.encode('utf-8')
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', arcurl)
for index in range(len(k)):
arcurl = 'http://www.bilibili.com/video/' + k[index]['bvid']
title = '[' + k[index]['typename'] + ']' + k[index]['title']
pic = k[index]['pic']
#duration = k[index]['duration']
#清除b站api数据污染
title = title.replace('<em class="keyword">', '')
title = title.replace('</em>', '')
videoitem = {}
videoitem['name'] = title
videoitem['href'] = arcurl
videoitem['thumb'] = 'http://'+pic
videoitem['genre'] = '喜剧片'
videos.append(videoitem)
if int(j['data']['numResults']) == 1000:
numResults = str(j['data']['numResults']) + '+'
else:
numResults = str(j['data']['numResults'])
dialog = xbmcgui.Dialog()
dialog.notification('当前'+ str(page) + '/' + str(j['data']['numPages']) + '页', '总共'+ numResults + '个视频', xbmcgui.NOTIFICATION_INFO, 5000,False)
return videos
@plugin.cached(TTL=10)
def get_vidsearch(keyword,page):
serachUrl = 'https://api.bilibili.com/x/web-interface/search/type?context=&search_type=video&order=&keyword=' + keyword + '&page=' + str(page) +'&duration=&category_id=&tids_1=&tids_2=&__refresh__=true&_extra=&highlight=1&single_column=0&jsonp=jsonp'
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Referer': 'https://search.bilibili.com/all?keyword=' + keyword
}
r = requests.get(serachUrl, headers=apiheaders)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#
videos = []
try:
bgm = j['data']['result']
#k = k.encode('utf-8')
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', arcurl)
for index in range(len(bgm)):
vurl = 'https://www.bilibili.com/video/' + str(bgm[index]['bvid'])
title = bgm[index]['title']
pic = bgm[index]['pic']
#清除b站api数据污染
title = title.replace('<em class="keyword">', '')
title = title.replace('</em>', '')
videoitem = {}
videoitem['name'] = title
videoitem['href'] = vurl
videoitem['thumb'] = 'http:'+pic
videos.append(videoitem)
if int(j['data']['numResults']) == 1000:
numResults = str(j['data']['numResults']) + '+'
else:
numResults = str(j['data']['numResults'])
dialog = xbmcgui.Dialog()
dialog.notification('当前'+ str(page) + '/' + str(j['data']['numPages']) + '页', '总共'+ numResults + '个视频', xbmcgui.NOTIFICATION_INFO, 5000,False)
except KeyError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '搜索结果为空', xbmcgui.NOTIFICATION_INFO, 5000,False)
return videos
@plugin.cached(TTL=10)
def get_bgsearch(keyword,page):
serachUrl = 'https://api.bilibili.com/x/web-interface/search/type?context=&search_type=media_bangumi&order=&keyword=' + keyword + '&page=' + str(page) +'&category_id=&__refresh__=true&_extra=&highlight=1&single_column=1&jsonp=jsonp'
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Referer': 'https://search.bilibili.com/all?keyword=' + keyword
}
r = requests.get(serachUrl, headers=apiheaders)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#
videos = []
try:
bgm = j['data']['result']
#k = k.encode('utf-8')
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', arcurl)
for index in range(len(bgm)):
if bgm[index]['ep_size'] != 0:
title = bgm[index]['title'] + ' [更新到第'.decode('utf-8') + str(bgm[index]['ep_size']) + '集]'.decode('utf-8')
else:
title = bgm[index]['title'] + ' [未开播]'.decode('utf-8')
surl = 'https://www.bilibili.com/bangumi/play/ss' + str(bgm[index]['season_id'])
pic = bgm[index]['cover']
#清除b站api数据污染
title = title.replace('<em class="keyword">', '')
title = title.replace('</em>', '')
videoitem = {}
videoitem['name'] = title
videoitem['href'] = surl
videoitem['thumb'] = 'http:'+pic
videos.append(videoitem)
if int(j['data']['numResults']) == 1000:
numResults = str(j['data']['numResults']) + '+'
else:
numResults = str(j['data']['numResults'])
dialog = xbmcgui.Dialog()
dialog.notification('当前'+ str(page) + '/' + str(j['data']['numPages']) + '页', '总共'+ numResults + '个视频', xbmcgui.NOTIFICATION_INFO, 5000,False)
except KeyError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '搜索结果为空', xbmcgui.NOTIFICATION_INFO, 5000,False)
return videos
@plugin.cached(TTL=10)
def get_movsearch(keyword,page):
serachUrl = 'https://api.bilibili.com/x/web-interface/search/type?context=&search_type=media_ft&order=&keyword=' + keyword + '&page=' + str(page) +'&category_id=&__refresh__=true&_extra=&highlight=1&single_column=0&jsonp=jsonp'
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Referer': 'https://search.bilibili.com/all?keyword=' + keyword
}
r = requests.get(serachUrl, headers=apiheaders)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#
videos = []
try:
bgm = j['data']['result']
#k = k.encode('utf-8')
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', arcurl)
for index in range(len(bgm)):
if bgm[index]['ep_size'] != 0:
title = bgm[index]['title'] + ' [更新到第'.decode('utf-8') + str(bgm[index]['ep_size']) + '集]'.decode('utf-8')
else:
title = bgm[index]['title'] + ' [未开播]'.decode('utf-8')
surl = 'https://www.bilibili.com/bangumi/play/ss' + str(bgm[index]['season_id'])
pic = bgm[index]['cover']
#清除b站api数据污染
title = title.replace('<em class="keyword">', '')
title = title.replace('</em>', '')
videoitem = {}
videoitem['name'] = title
videoitem['href'] = surl
videoitem['thumb'] = 'http:'+pic
videos.append(videoitem)
if int(j['data']['numResults']) == 1000:
numResults = str(j['data']['numResults']) + '+'
else:
numResults = str(j['data']['numResults'])
dialog = xbmcgui.Dialog()
dialog.notification('当前'+ str(page) + '/' + str(j['data']['numPages']) + '页', '总共'+ numResults + '个视频', xbmcgui.NOTIFICATION_INFO, 5000,False)
except KeyError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '搜索结果为空', xbmcgui.NOTIFICATION_INFO, 5000,False)
return videos
@plugin.cached(TTL=10)
def get_livesearch(keyword,page):
serachUrl = 'https://api.bilibili.com/x/web-interface/search/type?context=&keyword=' + keyword + '&page=' + str(page) +'&order=&category_id=&duration=&user_type=&order_sort=&tids_1=&tids_2=&search_type=live&changing=id&cover_type=user_cover&__refresh__=true&__reload__=false&_extra=&highlight=1&single_column=0&jsonp=jsonp'
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Referer': 'https://search.bilibili.com/all?keyword=' + keyword
}
r = requests.get(serachUrl, headers=apiheaders)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#
videos = []
try:
bgm = j['data']['result']['live_room']
#k = k.encode('utf-8')
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', arcurl)
for index in range(len(bgm)):
title = bgm[index]['title']
if int(bgm[index]['live_status']) == 1:
title += u' [COLOR pink][LIVE][/COLOR]'
title += u' [在线' + zh(bgm[index]['online']).decode('utf-8') + u']'
pic = bgm[index]['cover']
#清除b站api数据污染
title = title.replace('<em class="keyword">', '')
title = title.replace('</em>', '')
videoitem = {}
videoitem['name'] = title
videoitem['href'] = bgm[index]['roomid']
videoitem['thumb'] = 'http:'+pic
videos.append(videoitem)
if int(j['data']['numResults']) == 1000:
numResults = str(j['data']['numResults']) + '+'
else:
numResults = str(j['data']['numResults'])
dialog = xbmcgui.Dialog()
dialog.notification('当前'+ str(page) + '/' + str(j['data']['numPages']) + '页', '总共'+ numResults + '个视频', xbmcgui.NOTIFICATION_INFO, 5000,False)
except KeyError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '搜索结果为空', xbmcgui.NOTIFICATION_INFO, 5000,False)
return videos
@plugin.cached(TTL=10)
def get_upsearch(keyword,page):
serachUrl = 'https://api.bilibili.com/x/web-interface/search/type?context=&search_type=bili_user&order=&keyword=' + keyword + '&page=' + str(page) +'&category_id=&user_type=&order_sort=&changing=mid&__refresh__=true&_extra=&highlight=1&single_column=0&jsonp=jsonp'
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Referer': 'https://search.bilibili.com/all?keyword=' + keyword
}
r = requests.get(serachUrl, headers=apiheaders)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#
videos = []
try:
bgm = j['data']['result']
#k = k.encode('utf-8')
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', arcurl)
for index in range(len(bgm)):
title = bgm[index]['uname'].encode('utf-8')
pic = bgm[index]['upic']
#清除b站api数据污染
title = title.replace('<em class="keyword">', '')
title = title.replace('</em>', '')
if int(bgm[index]['gender']) == 1:
title += ' [COLOR blue][♂][/COLOR]'
if int(bgm[index]['gender']) == 2:
title += ' [COLOR pink][♀][/COLOR]'
title += ' ' + level_color(bgm[index]['level'])
if int(bgm[index]['is_live']) == 1:
title += ' [COLOR pink][LIVE][/COLOR]'
videoitem = {}
videoitem['name'] = title + ' - ' + zh(bgm[index]['videos']) + '投稿 · ' + zh(bgm[index]['fans']) + '粉丝'
videoitem['href'] = bgm[index]['mid']
videoitem['thumb'] = 'http:'+pic
videos.append(videoitem)
if int(j['data']['numResults']) == 1000:
numResults = str(j['data']['numResults']) + '+'
else:
numResults = str(j['data']['numResults'])
dialog = xbmcgui.Dialog()
dialog.notification('当前'+ str(page) + '/' + str(j['data']['numPages']) + '页', '总共'+ numResults + '个视频', xbmcgui.NOTIFICATION_INFO, 5000,False)
except KeyError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '搜索结果为空', xbmcgui.NOTIFICATION_INFO, 5000,False)
return videos
@plugin.cached(TTL=10)
def get_up(uid,page):
r = requests.get('https://api.bilibili.com/x/space/arc/search?mid='+uid+'&ps=30&tid=0&pn='+page+'&keyword=&order=pubdate&jsonp=jsonp', headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
#
videos = []
vlist = j['data']['list']['vlist']
for index in range(len(vlist)):
videoitem = {}
videoitem['name'] = vlist[index]['title']
videoitem['href'] = 'https://www.bilibili.com/video/' + vlist[index]['bvid']
videoitem['thumb'] = 'http:'+vlist[index]['pic']
videos.append(videoitem)
dialog = xbmcgui.Dialog()
dialog.notification('当前'+ str(page) + '/' + str(int(int(j['data']['page']['count']) / 30) + 1) + '页', '总共'+ str(j['data']['page']['count']) + '个视频', xbmcgui.NOTIFICATION_INFO, 5000,False)
return videos
@plugin.cached(TTL=60)
def get_bangumiinfo(url):
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
r1 = requests.get('https://www.bilibili.com/bangumi/media/md' + str(j['mediaInfo']['id']),headers=headers)
rt = r1.text
str1 = rt.find('window.__INITIAL_STATE__=')
str2 = rt.find(';(function(){var s')
mjson = rt[str1+25:str2]
j2 = json.loads(mjson)
stat = j['mediaInfo']['stat']
mp4info = {}
jianjie = zh(stat['views']) + '播放 · ' + zh(stat['danmakus']) + '弹幕 · ' + zh(stat['reply']) +'评论\n'
jianjie += str(j['mediaInfo']['rating']['score']) + '分('+ str(j['mediaInfo']['rating']['count']) + '人评) · ' + zh(stat['coins']) + '投币 · ' + zh(stat['favorites']) + '追番\n'
#bpnum = j2['mediaInfo']['stat']['danmakus'] + j2['mediaInfo']['stat']['series_follow'] + stat['reply'] +j['mediaInfo']['rating']['count'] + stat['coins']
#jianjie += '白嫖率:' + str(100-round((float(bpnum)/float(j2['mediaInfo']['stat']['views']))*100,2)) +'% \n'
jianjie += '--------------------------\n'
jianjie += j2['mediaInfo']['publish']['release_date_show'].encode('utf-8') + '\n'
jianjie += j2['mediaInfo']['publish']['time_length_show'].encode('utf-8') +'\n'
jianjie += '--------------------------\n'
try:
mp4info['plot'] = jianjie + j['mediaInfo']['evaluate'].encode('utf-8')
except AttributeError:
mp4info['plot'] = jianjie
mp4info['title'] = j['mediaInfo']['title']
mp4info['img'] = 'http:' + j['mediaInfo']['cover']
mp4info['rating'] = j['mediaInfo']['rating']['score']
mp4info['userrating'] = j['mediaInfo']['rating']['score']
mp4info['aired'] = j2['mediaInfo']['publish']['pub_date']
areas = []
for index in range(len(j2['mediaInfo']['areas'])):
areas.append(j2['mediaInfo']['areas'][index]['name'])
mp4info['country'] = areas
staff = j2['mediaInfo']['staff']
staff1 = staff.split('\n')
#print(staff1)
st12 = []
st13 = []
for index in range(len(staff1)):
if staff1[index].find('编剧'.decode('utf-8')) and staff1[index].find('设计'.decode('utf-8')) != -1:
st10 = staff1[index].split(':'.decode('utf-8'))
st11 = st10[1].split(',')
#print(st11)
for index in range(len(st11)):
st12.append(st11[index])
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', str(st12))
mp4info['writer'] = st12
if staff1[index].find('导演'.decode('utf-8')) and staff1[index].find('监督'.decode('utf-8')) != -1:
st10 = staff1[index].split(':'.decode('utf-8'))
st11 = st10[1].split(',')
for index in range(len(st11)):
st13.append(st11[index])
mp4info['director'] = st13
cast = []
cast1 = j2['mediaInfo']['actors']
cast1 = cast1.split('\n')
for index in range(len(cast1)):
if cast1[index].find(':'.decode('utf-8')) != -1:
cast2 = cast1[index].split(':'.decode('utf-8'))
cast.append((cast2[0],cast2[1]))
else:
cast.append(cast1[index])
mp4info['cast'] = cast
tag = []
for index in range(len(j2['mediaInfo']['styles'])):
tag.append(j2['mediaInfo']['styles'][index]['name'])
mp4info['genre'] = tag
mp4info['mediatype'] = 'video'
mp4info['reply'] = zh(stat['reply'])
return mp4info
@plugin.cached(TTL=60)
def get_mp4info(url):
r = requests.get(url,headers=headers)
r.encoding = 'utf-8'
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
uptime = j['videoData']['ctime']
#转换成localtime
time_local = time.localtime(uptime)
#转换成新的时间格式(2016-05-05 20:28:54)
uptime = time.strftime("%Y-%m-%d %H:%M:%S",time_local)
data = time.strftime("%Y-%m-%d",time_local)
sr = requests.get('https://api.bilibili.com/x/web-interface/archive/stat?aid='+str(j['aid']),headers=headers)
sj = json.loads(sr.text)
stat = sj['data']
mp4info = {}
jianjie = zh(stat['view']) + '播放 · ' + zh(stat['danmaku']) + '弹幕 · ' + zh(stat['reply']) +'评论\n'
jianjie += zh(stat['like']) + '赞 · ' + zh(stat['coin']) + '投币 · ' + zh(stat['favorite']) + '收藏\n'
mp4info['reply'] = zh(stat['reply'])
if stat['now_rank'] != 0:
jianjie += '今日全站日排行第' + str(stat['now_rank']) + '名\n'
if stat['his_rank'] != 0:
jianjie += '最高全站日排行第' + str(stat['his_rank']) + '名\n'
if stat['copyright'] == 1:
jianjie += '[COLOR red]未经作者许可,禁止转载[/COLOR]\n'
if 'bq' in cache:
if cache['bq'] == 1:
bpnum = bp([stat['like'],stat['coin'],stat['favorite']])
jianjie += '白嫖率:' + str(100-round(((float(bpnum) + float(stat['danmaku']) + float(stat['reply']) + float(stat['share']))/float(stat['view']))*100,2)) +'% \n'
jianjie += '--------------------------\n'
jianjie += 'av' + str(j['aid']) +' · ' + j['bvid'].encode('utf-8') +'\n'
jianjie += '发布时间:' + uptime +'\n'
jianjie += '--------------------------\n'
try:
mp4info['plot'] = jianjie + j['videoData']['desc'].encode('utf-8')
except AttributeError:
mp4info['plot'] = jianjie
mp4info['title'] = j['videoData']['title']
mp4info['img'] = j['videoData']['pic']
tag = []
for index in range(len(j['tags'])):
tag.append(j['tags'][index]['tag_name'])
mp4info['genre'] = tag
mp4info['tag'] = tag
cast = []
if j['staffData'] != []:
for index in range(len(j['staffData'])):
up = j['staffData'][index]['name'] + '[' +j['staffData'][index]['title'] +']'
fan = zh(j['staffData'][index]['follower']) + '粉丝'
cast.append((up,fan))
else:
up = j['upData']['name']
fan = zh(j['upData']['fans']) + '粉丝'
cast.append((up,fan))
mp4info['cast'] = cast
mp4info['dateadded'] = uptime
mp4info['aired'] = data
mp4info['duration'] = j['videoData']['duration']
mp4info['mediatype'] = 'video'
#传递uid
mp4info['upname'] = j['videoData']['owner']['name']
mp4info['uid'] = j['videoData']['owner']['mid']
mp4info['face'] = j['videoData']['owner']['face']
return mp4info
#评论区
@plugin.cached(TTL=60)
def get_comm(url,sort):
if re.match('https://',url) == None:
if re.match('http://',url) != None:
url = 'https://'+url[7:]
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '非法url')
ifbangumiurl = re.match('https://www.bilibili.com/bangumi/play/ep',url)
ifvideourl = re.match('https://www.bilibili.com/video/',url)
if ifbangumiurl or ifvideourl != None:
if ifbangumiurl != None:
epid = re.search(r'ep[0-9]+', url)
epid = epid.group()
epid = epid[2:]
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
elist = j['epList']
aid = ''
for index in range(len(elist)):
if int(elist[index]['id']) == int(epid):
aid = elist[index]['aid']
if aid == '':
slist = j['sections']
if slist != []:
for index in range(len(slist)):
##
sslist = slist[index]['epList']
for i in range(len(sslist)):
if int(sslist[i]['id']) == int(epid):
aid = sslist[index]['aid']
if aid == '':
dialog = xbmcgui.Dialog()
#dialog.textviewer('tt',epid)
if ifvideourl != None:
bvid = re.search(r'BV[a-zA-Z0-9]+', url)
bvurl = 'https://api.bilibili.com/x/web-interface/view?bvid='+bvid.group()
r = requests.get(bvurl,headers=headers)
j = json.loads(r.text)
aid = j['data']['aid']
mid = j['data']['owner']['mid']
#apiurl = 'http://api.bilibili.com/x/reply?type=1&oid='+str(aid)+'&sort=' + sort
apiurl = 'https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=1&type=1&oid='+str(aid)+'&sort=' + sort
apiheaders = {'user-agent' : 'Mozilla/5.0 (Linux; Android 10; Z832 Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Mobile Safari/537.36','referer':'https://www.bilibili.com/video/BV1Ze411W7EL'}
r = requests.get(apiurl,headers=apiheaders)
j = json.loads(r.text)
rep = j['data']['replies']
text = ''
for index in range(len(rep)):
text += '-----'*12 +'\n'
#时间处理
ctime = int(rep[index]['ctime'])
#转换成localtime
time_local = time.localtime(ctime)
#转换成新的时间格式(2016-05-05 20:28:54)
ctime = time.strftime("%Y-%m-%d %H:%M:%S",time_local)
#判断大会员
if rep[index]['member']['vip']['vipType'] == 2:
#是大会员,加上粉色名字
text += '[COLOR pink]' + rep[index]['member']['uname'].encode('utf-8') + '[/COLOR]'
else:
text += rep[index]['member']['uname'].encode('utf-8')
#加上等级后缀
text += level_color(rep[index]['member']['level_info']['current_level'])
#判断是否up主
if ifvideourl != None:
if int(mid) == int(rep[index]['member']['mid']):
text += ' [COLOR pink][UP主][/COLOR]'
text += '\n'
text += rep[index]['content']['message'].encode('utf-8') +'\n'
text += str(ctime) + ' · ' + str(rep[index]['like']) + '赞 · 共' + str(rep[index]['count']) +'条回复\n'
rrep = rep[index]['replies']
text += '-----'*12 +'\n\n'
if rrep:
for i in range(len(rrep)):
#时间处理
ctime = int(rrep[i]['ctime'])
#转换成localtime
time_local = time.localtime(ctime)
#转换成新的时间格式(2016-05-05 20:28:54)
ctime = time.strftime("%Y-%m-%d %H:%M:%S",time_local)
if rrep[i]['member']['vip']['vipType'] == 2:
#大会员
text += ' '*5 + '[COLOR pink]' + rrep[i]['member']['uname'].encode('utf-8') + '[/COLOR]'
else:
text += ' '*5 + rrep[i]['member']['uname'].encode('utf-8')
#加上等级后缀
text += level_color(rrep[i]['member']['level_info']['current_level'])
#判断是否up主
if ifvideourl != None:
if int(mid) == int(rep[index]['member']['mid']):
text += ' [COLOR pink][UP主][/COLOR]'
text += '\n'
text += ' '*5 + rrep[i]['content']['message'].encode('utf-8') +'\n'
text += ' '*5 + str(ctime) + ' · ' + str(rrep[i]['like']) + '赞 · 共' + str(rrep[i]['count']) +'条回复\n'
if len(rrep)-1 != i:
text += ' '*5 + '-----'*10 +'\n'
return text
@plugin.cached(TTL=60)
def get_bangumijson(url):
cutep = url.find('y/ep')
epnum = url[cutep+4:]
epnum = re.sub(r'\D','',epnum)
apiurl = 'https://api.bilibili.com/pgc/player/web/playurl/html5?ep_id='
rec = requests.get(apiurl+epnum,headers=mheaders)
#rec.encoding = 'utf-8'
rectext = rec.text
rectext = rectext.encode('utf-8')
j = json.loads(rec.text)
return j
@plugin.cached(TTL=10)
def get_api1(url,quality):
if re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url):
bvid = re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url)
vurl = 'https://api.bilibili.com/x/web-interface/view?bvid='+bvid.group()
if re.search('[aA]{1}[vV]{1}[0-9]+', url):
aid = re.search(r'[aA]{1}[vV]{1}[0-9]+', url)
aid = aid.group()
aid = aid[2:]
vurl = 'https://api.bilibili.com/x/web-interface/view?aid='+aid
if '?p=' in url:
# 单独下载分P视频中的一集
p = int(re.search(r'\?p=(\d+)',url).group(1)) -1
else:
p = 0
r = requests.get(vurl,headers=headers)
j = json.loads(r.text)
cid = j['data']['pages'][int(p)]['cid']
danmuku.Danmuku(cid)
print(cid)
entropy = 'rbMCKn@KuamXWlPMoJGsKcbiJKUfkPF_8dABscJntvqhRSETg'
appkey, sec = ''.join([chr(ord(i) + 2) for i in entropy[::-1]]).split(':')
params = 'appkey=%s&cid=%s&otype=json&qn=%s&quality=%s&type=' % (appkey, cid, quality, quality)
tmp = params + sec
tmp = tmp.encode('utf-8')
chksum = hashlib.md5(bytes(tmp)).hexdigest()
url_api = 'https://interface.bilibili.com/v2/playurl?%s&sign=%s' % (params, chksum)
apiheaders = {
'Referer': url,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
}
# print(url_api)
html = requests.get(url_api, headers=apiheaders).json()
# print(json.dumps(html))
video_list = []
for i in html['durl']:
video_list.append(i['url'])
# print(video_list)
return video_list
@plugin.cached(TTL=10)
def get_api2(url):
mp4 = ''
if re.match('https://',url) == None:
if re.match('http://',url) != None:
url = 'https://'+url[7:]
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '非法url')
ifvideourl = re.match('https://www.bilibili.com/video/',url)
if ifvideourl != None:
bvid = ''
aid = ''
if re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url):
bvid = re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url)
bvid = bvid.group()
vurl = 'https://api.bilibili.com/x/web-interface/view?bvid='+bvid
if re.search('[aA]{1}[vV]{1}[0-9]+', url):
aid = re.search(r'[aA]{1}[vV]{1}[0-9]+', url)
aid = aid.group()
aid = aid[2:]
vurl = 'https://api.bilibili.com/x/web-interface/view?aid='+aid
r = requests.get(vurl,headers=headers)
j = json.loads(r.text)
aid = j['data']['aid']
if '?p=' in url:
# 单独下载分P视频中的一集
p = int(re.search(r'\?p=(\d+)',url).group(1)) -1
else:
p = 0
cid = j['data']['pages'][p]['cid']
danmuku.Danmuku(cid)
apiurl = 'https://www.xbeibeix.com/api/bilibiliapi.php?url=https://www.bilibili.com/&aid='+str(aid)+'&cid=' + str(cid)
r = requests.get(apiurl,headers=headers)
j = json.loads(r.text)
if str(j['url']) != 'null':
mp4 = j['url']
dialog = xbmcgui.Dialog()
dialog.textviewer('错误提示', str(mp4))
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '视频不存在')
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '不支持的url格式')
return mp4
@plugin.cached(TTL=10)
def get_api3(url, quality):
if re.match('https://',url) == None:
if re.match('http://',url) != None:
url = 'https://'+url[7:]
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '非法url')
ifbangumiurl = re.match('https://www.bilibili.com/bangumi/play/ep',url)
ifvideourl = re.match('https://www.bilibili.com/video/',url)
if ifbangumiurl or ifvideourl != None:
if ifbangumiurl != None:
epid = re.search(r'ep[0-9]+', url)
epid = epid.group()
epid = epid[2:]
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
elist = j['epList']
bvid = ''
cid = ''
for index in range(len(elist)):
if int(elist[index]['id']) == int(epid):
bvid = elist[index]['bvid']
cid = elist[index]['cid']
if bvid == '' or cid == '':
slist = j['sections']
if slist != []:
for index in range(len(slist)):
##
sslist = slist[index]['epList']
for i in range(len(sslist)):
if int(sslist[i]['id']) == int(epid):
bvid = sslist[index]['bvid']
cid = sslist[index]['cid']
if bvid == '' or cid == '':
dialog = xbmcgui.Dialog()
#dialog.textviewer('tt',epid)
if ifvideourl != None:
bvid = ''
aid = ''
if re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url):
bvid = re.search(r'[Bb]{1}[Vv]{1}[a-zA-Z0-9]+', url)
bvid = bvid.group()
vurl = 'https://api.bilibili.com/x/web-interface/view?bvid='+bvid
if re.search('[aA]{1}[vV]{1}[0-9]+', url):
aid = re.search(r'[aA]{1}[vV]{1}[0-9]+', url)
aid = aid.group()
aid = aid[2:]
vurl = 'https://api.bilibili.com/x/web-interface/view?aid='+aid
r = requests.get(vurl,headers=headers)
j = json.loads(r.text)
#bvid = j['data']['pages'][0]['bvid']
if '?p=' in url:
# 单独下载分P视频中的一集
p = int(re.search(r'\?p=(\d+)',url).group(1)) -1
else:
p = 0
cid = j['data']['pages'][p]['cid']
danmuku.Danmuku(cid)
if bvid != '':
url_api = 'https://api.bilibili.com/x/player/playurl?cid={}&bvid={}&qn={}'.format(cid, bvid, quality)
else:
url_api = 'https://api.bilibili.com/x/player/playurl?cid={}&aid={}&qn={}'.format(cid, aid, quality)
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Cookie': sessdata(), # 登录B站后复制一下cookie中的SESSDATA字段,有效期1个月
'Host': 'api.bilibili.com'
}
html = requests.get(url_api, headers=apiheaders).json()
video_list = []
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(html['data']['durl']))
if 'data' in html:
for i in html['data']['durl']:
video_list.append(i['url'])
else:
dialog = xbmcgui.Dialog()
dialog.ok('提示','无法解析视频')
return video_list
@plugin.cached(TTL=10)
def get_api4(url,quality):
epid = re.search(r'ep[0-9]+', url)
epid = epid.group()
epid = epid[2:]
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
elist = j['epList']
bvid = ''
cid = ''
for index in range(len(elist)):
if int(elist[index]['id']) == int(epid):
bvid = elist[index]['bvid']
cid = elist[index]['cid']
if bvid == '' or cid == '':
slist = j['sections']
if slist != []:
for index in range(len(slist)):
##
sslist = slist[index]['epList']
for i in range(len(sslist)):
if int(sslist[i]['id']) == int(epid):
bvid = sslist[index]['bvid']
cid = sslist[index]['cid']
if bvid == '' or cid == '':
dialog = xbmcgui.Dialog()
#dialog.textviewer('tt',epid)
#hk9ho2af5hdw20wewf4ahqovwp79kq2z
#https://www.biliplus.com/BPplayurl.php?cid=181007115&bvid=BV1fK4y1r7sT&qn=80&module=bangumi&otype=json
url_api = 'https://www.biliplus.com/BPplayurl.php?cid={}&qn={}&module=bangumi&otype=json&bvid={}'.format(cid,quality,bvid)
danmuku.Danmuku(cid)
r = requests.get(url_api, headers=headers)
html = json.loads(r.text)
#video_list = []
# if 'durl' in html:
# videolist = []
# for i in html['durl']:
# videolist.append(i['url'])
# #video_list = video_list[0]
# if 'dash' in html:
# videolist = {}
# videolist['video'] = html['dash']['video'][0]['base_url']
# videolist['audio'] = html['dash']['audio'][0]['base_url']
# #video_list = video_list[0]
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(html))
video_list = []
if 'durl' in html:
for i in range(len(html['durl'])):
video_list.append(html['durl'][i]['url'])
#video_list = video_list[0]
else:
dialog = xbmcgui.Dialog()
dialog.ok('提示','无法解析视频')
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(video_list))
return video_list
@plugin.cached(TTL=10)
def get_api5(url,quality,api):
epid = re.search(r'ep[0-9]+', url)
epid = epid.group()
epid = epid[2:]
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
elist = j['epList']
bvid = ''
cid = ''
for index in range(len(elist)):
if int(elist[index]['id']) == int(epid):
bvid = elist[index]['bvid']
cid = elist[index]['cid']
if bvid == '' or cid == '':
slist = j['sections']
if slist != []:
for index in range(len(slist)):
##
sslist = slist[index]['epList']
for i in range(len(sslist)):
if int(sslist[i]['id']) == int(epid):
bvid = sslist[index]['bvid']
cid = sslist[index]['cid']
if bvid == '' or cid == '':
dialog = xbmcgui.Dialog()
#dialog.textviewer('tt',epid)
if int(api) == 1:
apihead = 'https://bilibili-tw-api.kghost.info/'
if int(api) == 2:
apihead = 'https://bilibili-hk-api.kghost.info/'
url_api = apihead + 'x/player/playurl?cid={}&bvid={}&qn={}'.format(cid, bvid, quality)
danmuku.Danmuku(cid)
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
#'Cookie': sessdata, # 登录B站后复制一下cookie中的SESSDATA字段,有效期1个月
#'Host': 'api.bilibili.com'
}
html = requests.get(url_api, headers=apiheaders).json()
video_list = []
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(html))
if html['data']:
for i in html['data']['durl']:
video_list.append(i['url'])
video_list = video_list[0]
else:
dialog = xbmcgui.Dialog()
dialog.ok('提示','无法解析视频')
return video_list
@plugin.cached(TTL=10)
def get_live(page):
videos = []
r = requests.get('https://api.live.bilibili.com/room/v1/room/get_user_recommend?page=' +str(page), headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
llist = j['data']
for index in range(len(llist)):
videoitem = {}
videoitem['name'] = llist[index]['title']
videoitem['href'] = llist[index]['roomid']
videoitem['thumb'] = llist[index]['user_cover']
videos.append(videoitem)
return videos
@plugin.cached(TTL=10)
def get_livemore(url,page):
videos = []
r = requests.get(url + '&page=' +str(page), headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
llist = j['data']['list']
for index in range(len(llist)):
videoitem = {}
videoitem['name'] = '[' + llist[index]['area_name'] + ']' +llist[index]['title']
videoitem['href'] = llist[index]['roomid']
videoitem['thumb'] = llist[index]['cover']
videos.append(videoitem)
return videos
@plugin.cached(TTL=1)
def get_roominfo(id):
flvdict = {}
r = requests.get('https://api.live.bilibili.com/room/v1/Room/get_info?id='+str(id), headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
ro = j['data']
soup = BeautifulSoup(ro['description'], "html5lib")
flvdict['title'] = ro['title']
flvdict['img'] = ro['user_cover']
jianjie = '房间号:' + str(ro['room_id']) + '\n'
if ro['short_id'] != 0:
jianjie += '短房间号:' + str(ro['short_id'])+ '\n'
jianjie += '在线:' + zh(ro['online']) + '\n'
if ro['live_status'] == 1:
flvdict['status'] = '开播'
jianjie += '开播时间:' + ro['live_time'].encode('utf-8') + '\n'
else:
flvdict['status'] = '未开播'
jianjie += '--------------------------\n'
jianjie += (soup.text).encode('utf-8')
flvdict['plot'] = jianjie
#time =
time = re.search('[\d]{4}-[\d]{2}-[\d]{2}',ro['live_time']).group()
flvdict['aired'] = time
genre = [ro['parent_area_name'],ro['area_name']]
tag = ro['tags'].split(',')
flvdict['genre'] = genre + tag
flvdict['tag'] = genre + tag
j = json.loads(get_up_baseinfo(ro['uid']))
#up主 cast
fan = zh(ro['attention']) + '粉丝'
#fan = fan.decode('utf-8')
flvdict['cast'] = [(j['data']['name'],fan)]
flvdict['mediatype'] = 'video'
return flvdict
@plugin.cached(TTL=1)
def get_roommp4(id):
r = requests.get('https://api.live.bilibili.com/xlive/web-room/v1/index/getRoomPlayInfo?room_id='+str(id)+'&play_url=1&mask=0&qn=0&platform=web', headers=headers)
r.encoding = 'UTF-8'
j = json.loads(r.text)
vlist = []
try:
flv = j['data']['play_url']['durl']
for index in range(len(flv)):
vlist.append(flv[index]['url'])
except TypeError:
dialog = xbmcgui.Dialog()
dialog.notification('获取直播源地址失败', '可能房间号不存在未开播', xbmcgui.NOTIFICATION_INFO, 5000,False)
return vlist
def get_categories():
return [{'name':'首页','link':'https://www.bilibili.com/ranking/all/0/0/1'},
{'name':'新番','link':'https://www.bilibili.com/ranking/bangumi/13/0/3'},
{'name':'动画','link':'https://www.bilibili.com/ranking/all/1/0/1'},
{'name':'国创','link':'https://www.bilibili.com/ranking/all/168/0/1'},
{'name':'音乐','link':'https://www.bilibili.com/ranking/all/3/0/1'},
{'name':'舞蹈','link':'https://www.bilibili.com/ranking/all/129/0/1'},
{'name':'游戏','link':'https://www.bilibili.com/ranking/all/4/0/1'},
{'name':'科技','link':'https://www.bilibili.com/ranking/all/36/0/1'},
{'name':'数码','link':'https://www.bilibili.com/ranking/all/188/0/1'},
{'name':'生活','link':'https://www.bilibili.com/ranking/all/160/0/1'},
{'name':'鬼畜','link':'https://www.bilibili.com/ranking/all/119/0/1'},
{'name':'时尚','link':'https://www.bilibili.com/ranking/all/155/0/1'},
{'name':'娱乐','link':'https://www.bilibili.com/ranking/all/5/0/1'},
{'name':'新人','link':'https://www.bilibili.com/ranking/rookie/0/0/3'}]
@plugin.cached(TTL=10)
def get_videos(category):
#爬视频列表的
# if int(page) == 1:
# pageurl = category
# else:
# pageurl = category + 'index_'+page+'.html'
pageurl = category
r = requests.get(pageurl, headers=headers)
r.encoding = 'UTF-8'
soup = BeautifulSoup(r.text, "html.parser")
videos = []
#videoelements = soup.find('ul', id='list1').find_all('li')
#videoelements = contenter.find_all("a", attrs={"data-original": True})
videoelements = soup.find_all('li',class_='rank-item')
rectext = r.text
cutjson =str(rectext.encode('utf-8'))
str1 = cutjson.find('window.__INITIAL_STATE__=')
str2 = cutjson.find(';(function(){var s;')
rankinfojson = cutjson[str1+25:str2]
j = json.loads(rankinfojson)
if videoelements is None:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '没有播放源')
else:
#dialog = xbmcgui.Dialog()
#sss = str(len(videoelements))
#ok = dialog.ok('video数量', sss)
num = 0
for videoelement in videoelements:
#img = videoelement.find('img')['alt']
#imgcut = img.find('.png@')
#img = img[:imgcut] + '.png'
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', img)
img = j['rankList'][num]['pic']
img = img.encode('utf-8')
videoitem = {}
videoitem['name'] = videoelement.find('img')['alt']
videoitem['href'] = videoelement.find('a')['href']
videoitem['thumb'] = img
videoitem['genre'] = '豆瓣电影'
videos.append(videoitem)
num = num+1
return videos
@plugin.cached(TTL=10)
def get_sources(url):
sources = []
if re.match('https://',url) == None:
if re.match('http://',url) != None:
url = 'https://'+url[7:]
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '非法url')
ifbangumiurl = re.match('https://www.bilibili.com/bangumi/play/ss',url)
ifvideourl = re.match('https://www.bilibili.com/video/',url)
if ifbangumiurl or ifvideourl != None:
if ifbangumiurl != None:
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
j = json.loads(vjson)
#缓存标题,方便后续判断
cache['bgtitle'] = j['h1Title']
elist = j['epList']
for index in range(len(elist)):
videosource = {}
if elist[index]['badge'] != '':
if elist[index]['longTitle'] == '':
ename = '正片 : ' + str(elist[index]['title'].encode('utf-8')) + ' - ' + elist[index]['longTitle'].encode('utf-8') + ' [COLOR pink][' + elist[index]['badge'].encode('utf-8') + '][/COLOR]'
else:
ename = '正片 : ' + str(elist[index]['title'].encode('utf-8')) + ' [COLOR pink][' + elist[index]['badge'].encode('utf-8') + '][/COLOR]'
else:
if elist[index]['longTitle'] != '':
ename = '正片 : ' + str(elist[index]['title'].encode('utf-8')) + ' - ' + elist[index]['longTitle'].encode('utf-8')
else:
ename = '正片 : ' + str(elist[index]['title'].encode('utf-8'))
href = 'https://www.bilibili.com/bangumi/play/ep' + str(elist[index]['id']) + '?https://www.bilibili.com/video/' + elist[index]['bvid']
videosource['name'] = ename
videosource['href'] = plugin.url_for('play',name=ename,url=href)
sources.append(videosource)
slist = j['sections']
if slist != []:
for index in range(len(slist)):
title = slist[index]['title'].encode('utf-8')
sslist = slist[index]['epList']
for index in range(len(sslist)):
videosource = {}
if sslist[index]['epStatus'] == 13:
if sslist[index]['longTitle'].encode('utf-8') != '':
ssname = title + ' : ' + str(sslist[index]['title'].encode('utf-8')) + ' - ' + sslist[index]['longTitle'].encode('utf-8') + ' [COLOR pink][会员][/COLOR]'
else:
ssname = title + ' : ' + str(sslist[index]['title'].encode('utf-8')) + ' [COLOR pink][会员][/COLOR]'
else:
if sslist[index]['longTitle'].encode('utf-8') != '':
ssname = title + ' : ' + str(sslist[index]['title'].encode('utf-8')) + ' - ' + sslist[index]['longTitle'].encode('utf-8')
else:
ssname = title + ' : ' + str(sslist[index]['title'].encode('utf-8'))
#href = 'https://www.bilibili.com/bangumi/play/ep' + str(sslist[index]['id']) + '?https://www.bilibili.com/video/' + elist[index]['bvid']
href = 'https://www.bilibili.com/bangumi/play/ep' + str(sslist[index]['id'])
videosource['name'] = ssname
videosource['href'] = plugin.url_for('play',name=ssname,url=href)
sources.append(videosource)
return sources
else:
#print('视频')
r = requests.get(url,headers=headers)
rtext = r.text
str1 = rtext.find('window.__INITIAL_STATE__=')
str2 = rtext.find(';(function(){var s')
vjson = rtext[str1+25:str2]
#print(vjson)
j = json.loads(vjson)
vlist = j['videoData']['pages']
for index in range(len(vlist)):
if len(vlist) == 1:
href = url
else:
href = url + '?p=' + str(vlist[index]['page'])
videosource = {}
videosource['name'] = str(vlist[index]['page']) + ' - ' + vlist[index]['part']
#videosource['thumb'] = j['videoData']['pic']
videosource['href'] = plugin.url_for('play',name=(str(vlist[index]['page']) + ' - ' + vlist[index]['part']).encode('utf-8'),url=href)
#
sources.append(videosource)
return sources
#收藏分类列表
@plugin.cached(TTL=10)
def get_collectlist(uid):
clists = []
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Cookie': sessdata(), # 登录B站后复制一下cookie中的SESSDATA字段,有效期1个月
'Host': 'api.bilibili.com'
}
r = requests.get('https://api.bilibili.com/x/v3/fav/folder/created/list-all?up_mid='+str(uid)+'&jsonp=jsonp',headers=apiheaders)
j = json.loads(r.text)
try:
c = j['data']['list']
for index in range(len(c)):
source = {}
source['name'] = c[index]['title']
source['href'] = c[index]['id']
clists.append(source)
except TypeError:
dialog = xbmcgui.Dialog()
dialog.notification('获取不到收藏信息','可能是sessdata已经过期', xbmcgui.NOTIFICATION_INFO, 5000)
return clists
#收藏分类下视频列表
@plugin.cached(TTL=10)
def get_collect(id,page):
clists = []
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Cookie': sessdata(), # 登录B站后复制一下cookie中的SESSDATA字段,有效期1个月
'Host': 'api.bilibili.com'
}
r = requests.get('https://api.bilibili.com/x/v3/fav/resource/list?media_id='+str(id)+'&pn='+str(page)+'&ps=20&keyword=&order=mtime&type=0&tid=0&jsonp=jsonp',headers=apiheaders)
j = json.loads(r.text)
c = j['data']['medias']
for index in range(len(c)):
source = {}
source['name'] = c[index]['title']
source['thumb'] = c[index]['cover']
source['href'] = 'https://www.bilibili.com/video/' + c[index]['bvid']
clists.append(source)
dialog = xbmcgui.Dialog()
allnum = j['data']['info']['media_count']
allpage = int(int(allnum)/20) + 1
dialog.notification('第'+str(page)+'/'+str(allpage)+'页','共' + str(allnum) + '个视频', xbmcgui.NOTIFICATION_INFO, 5000,False)
return clists
#追番/追剧 mode=1 是追番列表 2是追剧列表
@plugin.cached(TTL=10)
def get_zhui(uid,page,mode):
clists = []
apiheaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Cookie': sessdata(), # 登录B站后复制一下cookie中的SESSDATA字段,有效期1个月
'Host': 'api.bilibili.com'
}
r = requests.get('https://api.bilibili.com/x/space/bangumi/follow/list?type='+str(mode)+'&follow_status=0&pn='+str(page)+'&ps=30&vmid='+str(uid),headers=apiheaders)
j = json.loads(r.text)
try:
c = j['data']['list']
for index in range(len(c)):
source = {}
if c[index]['badge'] != '':
source['name'] = c[index]['title'] + ' [COLOR pink][' +c[index]['badge'] +'][/COLOR]'
else:
source['name'] = c[index]['title']
source['thumb'] = c[index]['cover']
source['href'] = 'https://www.bilibili.com/bangumi/play/ss' + str(c[index]['season_id'])
clists.append(source)
dialog = xbmcgui.Dialog()
allnum = j['data']['total']
allpage = int(int(allnum)/30) + 1
dialog.notification('第'+str(page)+'/'+str(allpage)+'页','共' + str(allnum) + '个节目', xbmcgui.NOTIFICATION_INFO, 5000,False)
except KeyError:
dialog = xbmcgui.Dialog()
dialog.notification('获取不到追番/追剧信息','可能是sessdata已经过期', xbmcgui.NOTIFICATION_INFO, 5000)
return clists
@plugin.route('/play/<name>/<url>/')
def play(name,url):
ifbangumiurl = re.match('https://www.bilibili.com/bangumi/play/ep',url)
ifvideourl = re.match('https://www.bilibili.com/video/',url)
if ifbangumiurl or ifvideourl != None:
if ifbangumiurl != None:
#番剧
items = []
if 'bgtitle' in cache:
ti = cache['bgtitle']
ti = ti.encode('utf-8')
if re.search('僅限.*地區',ti):
#gangaotai
item = {'label': '[1080p]b站国外代理解析 [多谢 biliplus.com API]','path': plugin.url_for('api4', name=name,url=url,quality='80')}
items.append(item)
item = {'label': '[720p]b站国外代理解析 [多谢 biliplus.com API]','path': plugin.url_for('api4', name=name,url=url,quality='64')}
items.append(item)
item = {'label': '[480p]b站国外代理解析 [多谢 biliplus.com API]','path': plugin.url_for('api4', name=name,url=url,quality='32')}
items.append(item)
if re.search('僅限.*台.*地區',ti):
item = {'label': '[480p]b站台湾代理解析 [多谢 kghost.info API]','path': plugin.url_for('api5', name=name,url=url,quality='32',api=1)}
items.append(item)
if re.search('僅限.*港.*地區',ti):
item = {'label': '[480p]b站香港代理解析 [多谢 kghost.info API]','path': plugin.url_for('api5', name=name,url=url,quality='32',api=2)}
items.append(item)
item = {'label': '[320p]b站国外代理解析 [多谢 biliplus.com API]','path': plugin.url_for('api4', name=name,url=url,quality='16')}
items.append(item)
if re.search('僅限.*台.*地區',ti):
item = {'label': '[320p]b站台湾代理解析 [多谢 kghost.info API]','path': plugin.url_for('api5', name=name,url=url,quality='16',api=1)}
items.append(item)
if re.search('僅限.*港.*地區',ti):
item = {'label': '[320p]b站香港代理解析 [多谢 kghost.info API]','path': plugin.url_for('api5', name=name,url=url,quality='16',api=2)}
items.append(item)
else:
#dalu
if sessdata() != '':
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', sessdata())
item = {'label': '[1080p]使用 b站官方api2 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api3', name=name,url=url,quality=80)}
items.append(item)
item = {'label': '[720p]使用 b站官方api2 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api3', name=name,url=url,quality=64)}
items.append(item)
else:
dialog = xbmcgui.Dialog()
dialog.notification('未设置sessdata','使用api2 720p以上解析,请在 高级功能 内填写sessdata', xbmcgui.NOTIFICATION_INFO, 5000)
item = {'label': '[480p]使用 b站官方api2 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api3', name=name,url=url,quality=32)}
items.append(item)
item = {'label': '[320p]使用 b站官方api2 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api3', name=name,url=url,quality=16)}
items.append(item)
#item = {'label': '[480p][6分钟试看]使用 b站官方api3 解析','path': plugin.url_for('bangumiapi', name=name,url=url)}
#items.append(item)
return items
else:
#视频
items = []
item = {'label': '[1080p]使用 b站官方api1 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api1', name=name,url=url,quality=80)}
items.append(item)
item = {'label': '[1080p]使用 b站官方api2 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api3', name=name,url=url,quality=80)}
items.append(item)
item = {'label': '[原画]使用 xbeibeix.com api 解析','path': plugin.url_for('api2', name=name,url=url)}
items.append(item)
item = {'label': '[720p]使用 b站官方api1 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api1', name=name,url=url,quality=64)}
items.append(item)
item = {'label': '[720p]使用 b站官方api2 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api3', name=name,url=url,quality=64)}
items.append(item)
item = {'label': '[480p]使用 b站官方api1 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api1', name=name,url=url,quality=32)}
items.append(item)
item = {'label': '[480p]使用 b站官方api2 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api3', name=name,url=url,quality=32)}
items.append(item)
item = {'label': '[320p]使用 b站官方api1 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api1', name=name,url=url,quality=16)}
items.append(item)
item = {'label': '[320p]使用 b站官方api2 解析 [万分感谢 Henryhaohao]','path': plugin.url_for('api3', name=name,url=url,quality=16)}
items.append(item)
return items
@plugin.route('/bangumiapi/<name>/<url>/')
#解析番剧地址
def bangumiapi(name,url):
j = get_bangumijson(url)
items = []
if j['code'] == 0:
k = j['result']['durl']
item = {'label': '[540P]'+name,'path': k[0]['url'],'is_playable': True}
items.append(item)
else:
if j['code'] == -10403:
#大会员错误码
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '此为大会员专享视频,无法解析')
else:
#
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '未知的api错误代码,可能是b站官方更改了接口')
return items
@plugin.route('/api1/<name>/<url>/<quality>/')
#使用api1
def api1(name,url,quality):
mp4url = get_api1(url,quality)
mp4info = get_mp4info(url)
img = mp4info['img']
items = []
head = '|User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36&Referer=https://www.bilibili.com&Range=bytes=0-&Connection=keep-alive&Origin=https://www.bilibili.com&Accept-Encoding=gzip, deflate, br'
for index in range(len(mp4url)):
item = {'label': name+' - '+mp4info['title'].encode('utf-8'),'path': mp4url[index]+head,'is_playable': True,'info':mp4info,'info_type':'video','thumbnail': img,'icon': img}
items.append(item)
face = mp4info['face']
item = {'label': '查看 [COLOR yellow]'+mp4info['upname'].encode('utf-8') +'[/COLOR] 的主页','path': plugin.url_for('up',uid=mp4info['uid'],page=1),'thumbnail': face,'icon': face}
items.append(item)
item = {'label': '评论区 [COLOR yellow]' + mp4info['reply'] + '[/COLOR]','path': plugin.url_for('conn',url=url)}
items.append(item)
return items
@plugin.route('/api2/<name>/<url>/')
#使用api2
def api2(name,url):
mp4url = get_api2(url)
dialog = xbmcgui.Dialog()
dialog.textviewer('错误提示', str(mp4url))
mp4info = get_mp4info(url)
img = mp4info['img']
items = []
head = '|User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36&Referer=https://www.bilibili.com'
item = {'label': name+' - '+mp4info['title'].encode('utf-8'),'path': str(mp4url)+head,'is_playable': True,'info':mp4info,'info_type':'video','thumbnail': img,'icon': img}
items.append(item)
face = mp4info['face']
item = {'label': '查看 [COLOR yellow]'+mp4info['upname'].encode('utf-8') +'[/COLOR] 的主页','path': plugin.url_for('up',uid=mp4info['uid'],page=1),'thumbnail': face,'icon': face}
items.append(item)
item = {'label': '评论区 [COLOR yellow]' + mp4info['reply'] + '[/COLOR]','path': plugin.url_for('conn',url=url)}
items.append(item)
return items
@plugin.route('/api3/<name>/<url>/<quality>/')
#
def api3(name,url,quality):
ifbangumiurl = re.match('https://www.bilibili.com/bangumi/play/ep',url)
ifvideourl = re.match('https://www.bilibili.com/video/',url)
if ifbangumiurl or ifvideourl != None:
if ifbangumiurl != None:
mp4url = get_api3(url,quality)
mp4info = get_bangumiinfo(url)
if ifvideourl != None:
mp4url = get_api3(url,quality)
mp4info = get_mp4info(url)
img = mp4info['img']
items = []
head = '|User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36&Referer=https://www.bilibili.com'
for index in range(len(mp4url)):
item = {'label': name+' - '+mp4info['title'].encode('utf-8'),'path': mp4url[index]+head,'is_playable': True,'info':mp4info,'info_type':'video','thumbnail': img,'icon': img}
items.append(item)
if ifvideourl != None:
face = mp4info['face']
item = {'label': '查看 [COLOR yellow]'+mp4info['upname'].encode('utf-8') +'[/COLOR] 的主页','path': plugin.url_for('up',uid=mp4info['uid'],page=1),'thumbnail': face,'icon': face}
items.append(item)
item = {'label': '评论区 [COLOR yellow]'+ mp4info['reply'] + '[/COLOR]','path': plugin.url_for('conn',url=url)}
items.append(item)
return items
#代理解析1
@plugin.route('/api4/<name>/<url>/<quality>/')
def api4(name,url,quality):
mp4url = get_api4(url,quality)
mp4info = get_bangumiinfo(url)
mp4url =mp4url[0]
#if mp4url.find('upos-hz-mirrorakam.akamaized.net') != -1:
#mp4url = mp4url.replace('upos-hz-mirrorakam.akamaized.net','calm-meadow-79f1.zhengfan2014.workers.dev')
img = mp4info['img']
items = []
item = {'label': name+' - '+mp4info['title'].encode('utf-8'),'path': mp4url,'is_playable': True,'info':mp4info,'info_type':'video','thumbnail': img,'icon': img}
items.append(item)
item = {'label': '评论区 [COLOR yellow]'+ mp4info['reply'] + '[/COLOR]','path': plugin.url_for('conn',url=url)}
items.append(item)
return items
#代理解析2
@plugin.route('/api5/<name>/<url>/<quality>/<api>/')
def api5(name,url,quality,api):
mp4url = get_api5(url,quality,api)
#dialog = xbmcgui.Dialog()
#dialog.textviewer('评论区',str(mp4url))
mp4info = get_bangumiinfo(url)
img = mp4info['img']
items = []
item = {'label': name+' - '+mp4info['title'].encode('utf-8'),'path': mp4url,'is_playable': True,'info':mp4info,'info_type':'video','thumbnail': img,'icon': img}
items.append(item)
item = {'label': '评论区 [COLOR yellow]'+ mp4info['reply'] + '[/COLOR]','path': plugin.url_for('conn',url=url)}
items.append(item)
return items
@plugin.route('/sources/<url>/')
def sources(url):
sources = get_sources(url)
items = [{
'label': source['name'],
'path': source['href']
#'is_playable': True
} for source in sources]
sorted_items = sorted(items, key=lambda item: item['label'])
return sorted_items
@plugin.route('/category/<url>/')
def category(url):
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', url)
videos = get_videos(url)
items = [{
'label': video['name'],
'path': plugin.url_for('sources', url=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb'],
} for video in videos]
sorted_items = items
#sorted_items = sorted(items, key=lambda item: item['label'])
return sorted_items
@plugin.route('/collectlist')
def collectlist():
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', url)
if 'uid' in cache:
uid = cache['uid']
clists = get_collectlist(uid)
items = [{
'label': clist['name'],
'path': plugin.url_for('collect', id=clist['href'],page=1),
} for clist in clists]
else:
items = []
sorted_items = items
#sorted_items = sorted(items, key=lambda item: item['label'])
return sorted_items
@plugin.route('/collect/<id>/<page>/')
def collect(id,page):
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', url)
clists = get_collect(id,page)
items = [{
'label': clist['name'],
'path': plugin.url_for('sources', url=clist['href']),
'thumbnail': clist['thumb'],
'icon': clist['thumb'],
} for clist in clists]
if len(clists) == 20:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('collect',page=int(page)+1,id=id),
})
sorted_items = items
#sorted_items = sorted(items, key=lambda item: item['label'])
return sorted_items
@plugin.route('/zhui/<page>/<mode>/')
def zhui(page,mode):
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', url)
if 'uid' in cache:
uid =cache['uid']
clists = get_zhui(uid,page,mode)
items = [{
'label': clist['name'],
'path': plugin.url_for('sources', url=clist['href']),
'thumbnail': clist['thumb'],
'icon': clist['thumb'],
} for clist in clists]
if len(clists) == 30:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('zhui',page=int(page)+1,mode=mode),
})
else:
items =[]
return items
@plugin.route('/')
def index():
categories = get_categories()
items = []
for category in categories:
items.append({
'label': category['name'],
'path': plugin.url_for('category', url=category['link']),
})
items.append({
'label': u'[COLOR yellow]直播[/COLOR]',
'path': plugin.url_for('live'),
})
items.append({
'label': u'[COLOR yellow]收藏/追番[/COLOR]',
'path': plugin.url_for('my'),
})
items.append({
'label': u'[COLOR yellow]搜索/av/bv号[/COLOR]',
'path': plugin.url_for('sea'),
})
items.append({
'label': u'[COLOR yellow]设置[/COLOR]',
'path': plugin.url_for('vip'),
})
return items
@plugin.route('/my/')
def my():
items = []
if 'uid' in cache and 'sessdata' in cache and cache['uid'] != '' and cache['sessdata'] != '':
items.append({
'label': u'收藏列表',
'path': plugin.url_for('collectlist'),
})
items.append({
'label': u'追番列表',
'path': plugin.url_for('zhui',page=1,mode=1),
})
items.append({
'label': u'追剧列表',
'path': plugin.url_for('zhui',page=1,mode=2),
})
items.append({
'label': u'投稿的视频',
'path': plugin.url_for('up',page=1,uid=cache['uid']),
})
else:
items.append({
'label': u'请先设置uid和sessdata的值后,方可使用',
'path': 'error',
})
return items
@plugin.route('/sea/')
def sea():
items = []
items.append({
'label': u'综合搜索',
'path': plugin.url_for('history',name='输入关键词搜索',url='search'),
})
items.append({
'label': u'搜索视频',
'path': plugin.url_for('history',name='输入关键词搜索视频',url='vidsearch'),
})
items.append({
'label': u'输入av或者bv号或者链接',
'path': plugin.url_for('history',name='输入av或者bv号或者链接打开视频',url='vid'),
})
items.append({
'label': u'搜索番剧',
'path': plugin.url_for('history',name='输入关键词搜索番剧',url='bgsearch'),
})
items.append({
'label': u'搜索影视',
'path': plugin.url_for('history',name='输入关键词搜索电影电视剧纪录片',url='movsearch'),
})
items.append({
'label': u'搜索up主',
'path': plugin.url_for('history',name='输入关键词搜索up主',url='upsearch'),
})
items.append({
'label': u'搜索正在直播的直播间',
'path': plugin.url_for('history',name='输入关键词搜索直播间(搜索主播名字进直播间请用搜索up主)',url='livesearch'),
})
items.append({
'label': u'输入房间号进入直播间',
'path': plugin.url_for('history',name='输入房间号进入直播间',url='roomid'),
})
return items
@plugin.route('/vip/')
def vip():
items = []
if 'uid' in cache and cache['uid'] != '':
items.append({
'label': u'设置uid (uid:'+cache['uid'] +')',
'path': plugin.url_for('input',key='uid',value='请输入uid:'),
})
else:
items.append({
'label': u'设置uid (uid为空)',
'path': plugin.url_for('input',key='uid',value='请输入uid:'),
})
if 'sessdata' in cache and cache['sessdata'] != '':
items.append({
'label': u'设置SESSDATA (SESSDATA:'+cache['sessdata'] +')',
'path': plugin.url_for('input',key='sessdata',value='请输入sessdata:(sessdata有效期30天,过期后只能解析480p的视频)'),
})
else:
items.append({
'label': u'设置SESSDATA (sessdata为空)',
'path': plugin.url_for('input',key='sessdata',value='请输入sessdata:(sessdata有效期30天,过期后只能解析480p的视频)'),
})
items.append({
'label': 'up主视频白嫖率计算 - 仅供娱乐 (状态:'+chushihua('bq',0) +')',
'path': plugin.url_for('switch',key='bq'),
})
return items
@plugin.route('/search/<value>/<page>/')
def search(value,page):
items = []
if value != 'null' and int(page) != 1:
keyword = value
else:
keyboard = xbmc.Keyboard('', '请输入搜索内容')
xbmc.sleep(1500)
hi = his['search']
if value != 'null':
keyboard.setDefault(value)
keyboard.doModal()
if (keyboard.isConfirmed()):
keyword = keyboard.getText()
if keyword != '':
hi[keyword] = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
try:
videos = get_search(keyword,page)
items = [{
'label': video['name'],
'path': plugin.url_for('sources', url=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb']
} for video in videos]
if len(videos) >= 20:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('vidsearch',page=int(page)+1,value=value),
})
return items
except UnboundLocalError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '您取消了搜索', xbmcgui.NOTIFICATION_INFO, 5000,False)
@plugin.route('/bgsearch/<value>/<page>/')
def bgsearch(value,page):
items = []
if value != 'null' and int(page) != 1:
keyword = value
else:
keyboard = xbmc.Keyboard('', '请输入搜索内容')
xbmc.sleep(1500)
hi = his['bgsearch']
if value != 'null':
keyboard.setDefault(value)
keyboard.doModal()
if (keyboard.isConfirmed()):
keyword = keyboard.getText()
if keyword != '':
hi[keyword] = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
videos = get_bgsearch(keyword,page)
items = [{
'label': video['name'],
'path': plugin.url_for('sources', url=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb']
} for video in videos]
if len(videos) >= 20:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('bgsearch',page=int(page)+1,value=value),
})
return items
except UnboundLocalError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '您取消了搜索', xbmcgui.NOTIFICATION_INFO, 5000,False)
@plugin.route('/movsearch/<value>/<page>/')
def movsearch(value,page):
items = []
if value != 'null' and int(page) != 1:
keyword = value
else:
keyboard = xbmc.Keyboard('', '请输入搜索内容')
xbmc.sleep(1500)
hi = his['movsearch']
if value != 'null':
keyboard.setDefault(value)
keyboard.doModal()
if (keyboard.isConfirmed()):
keyword = keyboard.getText()
if keyword != '':
hi[keyword] = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
videos = get_movsearch(keyword,page)
items = [{
'label': video['name'],
'path': plugin.url_for('sources', url=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb']
} for video in videos]
if len(videos) >= 20:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('movsearch',page=int(page)+1,value=value),
})
return items
except UnboundLocalError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '您取消了搜索', xbmcgui.NOTIFICATION_INFO, 5000,False)
@plugin.route('/vidsearch/<value>/<page>/')
def vidsearch(value,page):
items = []
if value != 'null' and int(page) != 1:
keyword = value
else:
keyboard = xbmc.Keyboard('', '请输入搜索内容')
xbmc.sleep(1500)
hi = his['vidsearch']
if value != 'null':
keyboard.setDefault(value)
keyboard.doModal()
if (keyboard.isConfirmed()):
keyword = keyboard.getText()
if keyword != '':
hi[keyword] = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
videos = get_vidsearch(keyword,page)
items = [{
'label': video['name'],
'path': plugin.url_for('sources', url=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb']
} for video in videos]
if len(videos) >= 20:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('vidsearch',page=int(page)+1,value=value),
})
return items
except UnboundLocalError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '您取消了搜索', xbmcgui.NOTIFICATION_INFO, 5000,False)
@plugin.route('/livesearch/<value>/<page>/')
def livesearch(value,page):
items = []
if value != 'null' and int(page) != 1:
keyword = value
else:
keyboard = xbmc.Keyboard('', '请输入搜索内容')
xbmc.sleep(1500)
hi = his['livesearch']
if value != 'null':
keyboard.setDefault(value)
keyboard.doModal()
if (keyboard.isConfirmed()):
keyword = keyboard.getText()
if keyword != '':
hi[keyword] = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
videos = get_livesearch(keyword,page)
items = [{
'label': video['name'],
'path': plugin.url_for('room', id=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb']
} for video in videos]
if len(videos) >= 40:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('livesearch',page=int(page)+1,value=value),
})
return items
except UnboundLocalError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '您取消了搜索', xbmcgui.NOTIFICATION_INFO, 5000,False)
@plugin.route('/upsearch/<value>/<page>/')
def upsearch(value,page):
items = []
if value != 'null' and int(page) != 1:
keyword = value
else:
keyboard = xbmc.Keyboard('', '请输入搜索内容')
xbmc.sleep(1500)
hi = his['upsearch']
if value != 'null':
keyboard.setDefault(value)
keyboard.doModal()
if (keyboard.isConfirmed()):
keyword = keyboard.getText()
if keyword != '':
hi[keyword] = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
videos = get_upsearch(keyword,page)
items = [{
'label': video['name'],
'path': plugin.url_for('up', uid=video['href'],page=1),
'thumbnail': video['thumb'],
'icon': video['thumb']
} for video in videos]
if len(videos) >= 20:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('upsearch',page=int(page)+1,value=value),
})
return items
except UnboundLocalError:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '您取消了搜索', xbmcgui.NOTIFICATION_INFO, 5000,False)
@plugin.route('/vid/<value>/')
def vid(value):
keyboard = xbmc.Keyboard('', '请输入av号或者bv号或者url:')
xbmc.sleep(1500)
if value != 'null':
keyboard.setDefault(value)
keyboard.doModal()
hi = his['vid']
if (keyboard.isConfirmed()):
if re.search('[Bb]{1}[Vv]{1}[a-zA-Z0-9]+',keyboard.getText()) or re.search('[aA]{1}[vV]{1}[0-9]+',keyboard.getText()):
if re.search('[Bb]{1}[Vv]{1}[a-zA-Z0-9]+',keyboard.getText()):
keyword = re.search('[Bb]{1}[Vv]{1}[a-zA-Z0-9]+',keyboard.getText()).group()
dialog = xbmcgui.Dialog()
dialog.notification('BV号提取成功',keyword, xbmcgui.NOTIFICATION_INFO, 5000,False)
hi[keyword] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if re.search('[aA]{1}[vV]{1}[0-9]+',keyboard.getText()):
keyword = re.search('[aA]{1}[vV]{1}[0-9]+',keyboard.getText()).group()
dialog = xbmcgui.Dialog()
dialog.notification('AV号提取成功',keyword, xbmcgui.NOTIFICATION_INFO, 5000,False)
hi[keyword] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sources = get_sources('https://www.bilibili.com/video/'+str(keyword))
items = [{
'label': source['name'],
'path': source['href'],
#'thumbnail': source['thumb'],
#'icon': source['thumb'],
} for source in sources]
#sorted_items = sorted(items, key=lambda item: item['label'])
return items
else:
dialog = xbmcgui.Dialog()
dialog.notification('提示', '您取消了输入', xbmcgui.NOTIFICATION_INFO, 5000,False)
@plugin.route('/roomid/<value>/')
def roomid(value):
if value == 'null':
keyboard = xbmc.Keyboard('', '请输入房间号(纯数字):')
xbmc.sleep(1500)
keyboard.doModal()
hi = his['roomid']
if (keyboard.isConfirmed()):
keyword = keyboard.getText()
hi[keyword] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
else:
keyword = value
items = []
id = int(keyword)
mp4list = get_roommp4(id)
mp4info = get_roominfo(id)
img = mp4info['img']
for index in range(len(mp4list)):
title = ''
if index == 0:
title += '[主线]'
else:
title += '[备线'+str(index)+']'
title += '[原画]' + mp4info['title'].encode('utf-8')
item = {'label': title,'path':mp4list[index],'is_playable': True,'info':mp4info,'info_type':'video','thumbnail': img,'icon': img}
items.append(item)
if mp4info['status'] == '未开播':
item = {'label': '[未开播]' +rd_live_gz(),'path':'0','is_playable': True,'info':mp4info,'info_type':'video','thumbnail': img,'icon': img}
items.append(item)
return items
@plugin.route('/live/')
def live():
items = []
items.append({'label': '全部','path': plugin.url_for('livelist', page=1)})
videos = [{'name':'网游','url':plugin.url_for('livelistmore', url='https://api.live.bilibili.com/room/v3/area/getRoomList?platform=web&parent_area_id=2&cate_id=0&area_id=0&sort_type=sort_type_124&page_size=30&tag_version=1',page=1)},
{'name':'手游','url':plugin.url_for('livelistmore', url='https://api.live.bilibili.com/room/v3/area/getRoomList?platform=web&parent_area_id=3&cate_id=0&area_id=0&sort_type=sort_type_121&page_size=30&tag_version=1',page=1)},
{'name':'单机','url':plugin.url_for('livelistmore', url='https://api.live.bilibili.com/room/v3/area/getRoomList?platform=web&parent_area_id=6&cate_id=0&area_id=0&sort_type=sort_type_150&page_size=30&tag_version=1',page=1)},
{'name':'娱乐','url':plugin.url_for('livelistmore', url='https://api.live.bilibili.com/room/v3/area/getRoomList?platform=web&parent_area_id=1&cate_id=0&area_id=0&sort_type=sort_type_152&page_size=30&tag_version=1',page=1)},
{'name':'电台','url':plugin.url_for('livelistmore', url='https://api.live.bilibili.com/room/v3/area/getRoomList?platform=web&parent_area_id=5&cate_id=0&area_id=0&sort_type=income&page_size=30&tag_version=1',page=1)},
{'name':'绘画','url':plugin.url_for('livelistmore', url='https://api.live.bilibili.com/room/v3/area/getRoomList?platform=web&parent_area_id=4&cate_id=0&area_id=0&sort_type=sort_type_56&page_size=30&tag_version=1',page=1)}]
for video in videos:
items.append({'label': video['name'],'path': video['url']})
return items
@plugin.route('/livelist/<page>/')
def livelist(page):
items = []
videos = get_live(page)
items = [{
'label': video['name'],
'path': plugin.url_for('room', id=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb'],
} for video in videos]
if len(videos) == 30:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('livelist',page=int(page)+1),
})
return items
@plugin.route('/livelistmore/<url>/<page>/')
def livelistmore(url,page):
items = []
videos = get_livemore(url,page)
items = [{
'label': video['name'],
'path': plugin.url_for('room', id=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb'],
} for video in videos]
if len(videos) == 30:
items.append({
'label': u'[COLOR yellow]下一页[/COLOR]',
'path': plugin.url_for('livelistmore',url=url,page=int(page)+1),
})
return items
@plugin.route('/room/<id>/')
def room(id):
items = []
mp4list = get_roommp4(id)
mp4info = get_roominfo(id)
img = mp4info['img']
for index in range(len(mp4list)):
title = ''
if index == 0:
title += '[主线]'
else:
title += '[备线'+str(index)+']'
title += '[原画]' + mp4info['title'].encode('utf-8')
item = {'label': title,'path':mp4list[index],'is_playable': True,'info':mp4info,'info_type':'video','thumbnail': img,'icon': img}
items.append(item)
return items
@plugin.route('/up/<uid>/<page>/')
def up(uid,page):
videos = get_up(uid,page)
items = []
if int(page) == 1:
u = json.loads(get_up_baseinfo(uid))
r = json.loads(get_up_roomold(uid))
items.append({
'label': u'关于[COLOR yellow]'+ u['data']['name'] + u'[/COLOR]目前已知的情报',
'path': plugin.url_for(upinfo,uid=uid),
'thumbnail': u['data']['face'],
'icon': u['data']['face'],
})
if int(r['data']['liveStatus']) == 1:
livename = u'通往[COLOR yellow]'+ u['data']['name'] +u'[/COLOR]的直播间:' + u'[COLOR red][·LIVE][/COLOR]' + r['data']['title']
else:
livename = u'[COLOR yellow]'+ u['data']['name'] +u'[/COLOR]的直播间:' + u'[在线' + zh(r['data']['online']).decode('utf-8') + u']' + u'[COLOR green][Close][/COLOR]' + r['data']['title']
items.append({
'label': livename,
'path': plugin.url_for(room,id=r['data']['roomid']),
'thumbnail': r['data']['cover'],
'icon': r['data']['cover'],
})
for video in videos:
items.append({'label': video['name'],'path': plugin.url_for('sources', url=video['href']),'thumbnail': video['thumb'],'icon': video['thumb']})
if len(videos) == 30:
items.append({
'label': '[COLOR yellow]下一页[/COLOR] ',
'path': plugin.url_for(up,uid=uid,page=int(page)+1),
})
return items
# @plugin.route('/liveplay/<url>/<q>/')
# def liveplay(url,q):
# items = []
# q = eval(q)
# for index in range(len(q)):
# #qn = re.search('&qn=\d+',url).group()
# #url = url.replace(qn,'&qn='+q[index]['qn'])
# item = {'label': q[index]['name'],'path': url+'&qn='+str(q[index]['qn']),'is_playable': True}
# items.append(item)
# return items
@plugin.route('/input/<key>/<value>/')
def input(key,value):
keyboard = xbmc.Keyboard('', value)
xbmc.sleep(1500)
keyboard.doModal()
if (keyboard.isConfirmed()):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('确认该值正确吗?', keyboard.getText())
if ret:
cache[key] = keyboard.getText()
dialog = xbmcgui.Dialog()
dialog.notification('提示','保存成功', xbmcgui.NOTIFICATION_INFO, 5000,False)
@plugin.route('/switch/<key>/')
def switch(key):
if cache[key] == 1:
cache[key] = 0
else:
cache[key] = 1
@plugin.route('/conn/<url>/')
def conn(url):
text = '********************热门评论********************\n'
text += get_comm(url,'2')
text += '\n********************最新评论********************\n'
text += get_comm(url,'1')
dialog = xbmcgui.Dialog()
dialog.textviewer('评论区',text)
@plugin.route('/upinfo/<uid>/')
def upinfo(uid):
text = get_upinfo(uid)
dialog = xbmcgui.Dialog()
dialog.textviewer('评论区',text)
@plugin.route('/labels/<label>/')
def show_label(label):
# 写抓取视频类表的方法
#
items = [
{'label': label},
]
return items
def get_key (dict, value):
return [k for k, v in dict.items() if v == value]
@plugin.route('/history/<name>/<url>/')
def history(name,url):
items = []
if url == 'search' or url =='bgsearch' or url == 'movsearch' or url == 'vidsearch' or url == 'livesearch' or url == 'upsearch':
items.append({
'label': '[COLOR yellow]'+ name +'[/COLOR]',
'path': plugin.url_for(url,value='null',page=1),
})
else:
items.append({
'label': '[COLOR yellow]'+ name +'[/COLOR]',
'path': plugin.url_for(url,value='null'),
})
#his[url] ={'aaa':'2019-01-23 10:00:00','bbb':'2019-01-23 09:01:00','ccc':'2019-01-23 09:00:59'}
if url in his:
hi = his[url]
else:
his[url] = {}
hi = his[url]
#hi = []
if hi:
val = list(hi.values())
val = sorted(val,reverse=True)
for index in range(len(val)):
if url == 'search' or url == 'bgsearch' or url == 'vidsearch' or url == 'movsearch' or url == 'livesearch' or url == 'upsearch':
items.append({
'label': name+ ':' +get_key(hi,val[index])[0] + ' - [查询时间:' + val[index] +']',
'path': plugin.url_for(url,value=get_key(hi,val[index])[0],page=1),
})
else:
items.append({
'label': name+ ':' +get_key(hi,val[index])[0] + ' - [查询时间:' + val[index] +']',
'path': plugin.url_for(url,value=get_key(hi,val[index])[0]),
})
#for index in range(len(hi)):
#items.append({
#'label': name+ ':' +hi[index],
#'path': plugin.url_for(url,value=hi[index]),
#})
items.append({
'label': '[COLOR yellow]清除历史记录[/COLOR]',
'path': plugin.url_for('cleanhis',url=url),
})
else:
items.append({
'label': '[COLOR yellow]历史记录为空[/COLOR]',
'path': plugin.url_for(ok,value='历史记录为空'),
})
return items
@plugin.route('/ok/<value>/')
def ok(value):
dialog = xbmcgui.Dialog()
ok = dialog.ok('提示', value)
@plugin.route('/cleanhis/<url>/')
def cleanhis(url):
his[url] = {}
dialog = xbmcgui.Dialog()
ok = dialog.ok('提示', '清理历史记录成功')
if __name__ == '__main__':
plugin.run()
| StarcoderdataPython |
1751941 | <gh_stars>0
from datetime import datetime
from hashlib import sha1
from os import path
from re import search
from typing import Any, Dict, List
from unicodedata import normalize
from elasticsearch_dsl import Document
from lxml import etree
from kosh.utils import logger
from kosh.utils import namespaces as ns
class entry():
'''
todo: docs
'''
def __init__(self, elex: Dict[str, Any]) -> None:
'''
todo: docs
'''
self.elex = elex
def parse(self, file: str) -> List[Document]:
'''
todo: docs
'''
docs = []
name = path.basename(file)
xmap = self.elex.schema.mappings._meta._xpaths
logger().debug('Parsing file %s/%s', self.elex.uid, name)
tree = etree.parse(file, etree.XMLParser(remove_blank_text = True))
for elem in tree.xpath(xmap.root, namespaces = ns()):
docs += [self.__record(elem)]
return docs
def schema(self, *args, **kwargs) -> Document:
'''
todo: docs
'''
class entry(Document):
class Index: name = self.elex.uid
emap = self.elex.schema.mappings.properties
for i in emap: entry._doc_type.mapping.field(i, emap[i].type)
return entry(*args, **kwargs)
def __record(self, root: etree.Element) -> Document:
'''
todo: docs
'''
elem = etree.tostring(root, encoding = 'unicode')
xmap = self.elex.schema.mappings._meta._xpaths
euid = next(iter(root.xpath(xmap.id, namespaces = ns())), None) \
or sha1(elem.encode('utf-8')).hexdigest()
item = self.schema(
meta = { 'id': euid },
created = datetime.now(),
xml = elem
)
for prop in xmap.fields:
for data in root.xpath(xmap.fields[prop], namespaces = ns()):
if isinstance(data, etree._Element) and data.text is not None:
data = normalize('NFC', data.text)
elif isinstance(data, etree._ElementUnicodeResult):
data = normalize('NFC', data)
else: data = None
if data is not None:
if not search(r'^\[.*\]$', prop): item[prop] = data
elif prop[1:-1] in item: item[prop[1:-1]] = [*item[prop[1:-1]], data]
else: item[prop[1:-1]] = [data]
root.clear()
return item
| StarcoderdataPython |
1655512 | from django.http import HttpResponse
from houdini_stats.models import *
from stats_main.models import *
from django.contrib.gis.geoip import GeoIP
from settings import REPORTS_START_DATE, _this_dir
from dateutil.relativedelta import relativedelta
import json
import re
import datetime
import time
import hashlib
import math
import settings
#===============================================================================
def text_http_response(content, status=200):
"""
Translate a response into HTML text.
"""
# FIXME: Why doesn't Django set the Content-Length header?
response = HttpResponse(content, status=status)
response["Content-Length"] = str(len(response.content))
return response
def json_http_response(content, status=200):
"""
Translate a response JSON and return.
"""
return text_http_response(json.dumps(content), status=status)
#-------------------------------------------------------------------------------
class StatsError(Exception):
"""
Parent class for all stats exceptions. Requires an HTTP status
code, an error message template, and optionally some formatting
arguments for that template.
"""
def __init__(self, status_code, msg_template, **kwargs):
Exception.__init__(self, msg_template % kwargs)
self.status_code = status_code
class ServerError(StatsError):
"""
Internal error.
"""
def __init__(self, msg_template, **kwargs):
StatsError.__init__(self, 500, msg_template, **kwargs)
class UnauthorizedError(StatsError):
"""
Access control (as opposed to permission).
"""
def __init__(self, msg_template, **kwargs):
StatsError.__init__(self, 401, msg_template, **kwargs)
#-------------------------------------------------------------------------------
def parse_byte_size_string(string):
"""
Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised.
>>> parse_byte_size_string('1 KB')
1024
>>> parse_byte_size_string('2.2 GB')
2362232012
"""
if len(string) == 0:
return
# Find out the numerical part.
initial_string = string
num_string = ""
while len(string) and (string[:1].isdigit() or string[:1] == '.'):
num_string += string[0]
string = string[1:]
num = float(num_string)
# Look for the suffix.
suffix = string.strip() or "B"
suffix_set = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
prefix = {suffix_set[0]: 1}
for i, string in enumerate(suffix_set[1:]):
prefix[string] = 1 << (i+1)*10
# If the data is garbage for some reason, just discard it.
if suffix not in prefix:
return 0
return int(num * prefix[suffix])
#-------------------------------------------------------------------------------
def is_valid_machine_config_hash(user_info):
"""
Compute the hash of the data, ignoring the hash value stored in the data,
and validate that the computed hash matches the one in the data.
We want to make sure that the same user configs always create the
same hash, so the data needs to be ordered.
"""
string_to_hash = ''.join([
key + ": " + unicode(user_info[key])
for key in sorted(user_info.keys())
if key != "config_hash"])
print "The hash passed to server: ", user_info["config_hash"]
print "The hash created by server: ", \
hashlib.md5(string_to_hash).hexdigest()
return (user_info["config_hash"] ==
hashlib.md5(string_to_hash).hexdigest())
#-------------------------------------------------------------------------------
def get_or_save_machine_config(user_info, ip_address, data_log_date):
"""
Get or save if not already in db the machine config
User Info:{ 'config_hash': '7ef9c42fe4d3748dc9aad755e02852d8',
'houdini_build_version': '146',
'houdini_major_version': '13',
'houdini_minor_version': '0',
'application_name': 'houdini',
'operating_system': 'linux-x86_64-gcc4.7',
'system_memory': '23.55 GB',
'license_category': 'Commercial',
'number_of_processors': '12',
'graphics_card': 'Quadro 600/PCIe/SSE2',
'graphics_card_version': '4.2.0 NVIDIA 304.88'
'mac_address_hash' : '05e8458a3e60776298ece4af002dcef7',
'cpu_info':
'system_resolution:
""
}
"""
# 1. Validate machine config
config_hash = user_info['config_hash']
# if not is_valid_machine_config_hash(user_info):
# print "Different"
# raise ServerError("Invalid config hash %(name)s.",
# name=config_hash)
#
# 2. Get or save Machine by hardware_id
hardware_id = user_info.get('mac_address_hash','')
machine, created = Machine.objects.get_or_create(hardware_id=hardware_id)
# 3. Get or save Machine Config
sys_memory = user_info.get('system_memory', "0")
product = user_info.get('application_name',"") + " " + user_info.get(
'license_category',"")
machine_config, created = MachineConfig.objects.get_or_create(
machine=machine,
config_hash=config_hash,
defaults= dict(
ip_address=ip_address,
creation_date=data_log_date,
graphics_card=user_info.get('graphics_card',''),
graphics_card_version=user_info.get('graphics_card_version',''),
operating_system=user_info.get('operating_system', ""),
system_memory=parse_byte_size_string(sys_memory),
number_of_processors=user_info.get('number_of_processors',0),
cpu_info=user_info.get('cpu_info', ""),
system_resolution=user_info.get('system_resolution', ""),
raw_user_info=str(user_info),
))
if created:
# Let applications extend the machine config model.
for app_name in settings.STATS_APPLICATIONS:
try:
app_module = __import__(app_name + ".models")
except ImportError:
continue
app_models_module = getattr(app_module, "models")
creation_function = getattr(
app_models_module, "create_machine_config_extension", None)
if creation_function is not None:
machine_config_extension = creation_function(
machine_config, user_info)
machine_config_extension.save()
return machine_config
#-------------------------------------------------------------------------------
def is_new_log_or_existing(machine_config, log_id, data_log_date):
"""
Verify if a log already exists and if not save it.
Returns true if the log is new, and false otherwise.
"""
log, created = LogId.objects.get_or_create(
machine_config=machine_config,
log_id=log_id,
defaults=dict(logging_date=data_log_date))
return created
#-------------------------------------------------------------------------------
def save_crash(machine_config, crash_log, data_log_date):
"""
Create a HoudiniCrash object and save it in DB..
crash_log: {
u'traceback': u'Caught signal 11\\n\\nAP_Interface::
createCrashLog(UTsignalHandlerArg....'
}
"""
crash = HoudiniCrash(
stats_machine_config=machine_config,
date=data_log_date,
stack_trace=crash_log['traceback'],
type="crash",
)
crash.save()
#-------------------------------------------------------------------------------
def save_uptime(machine_config, num_seconds, idle_time, data_log_date):
"""
Create Uptime record and save it in DB.
"""
uptime = Uptime(
stats_machine_config=machine_config,
date=data_log_date,
number_of_seconds=num_seconds,
idle_time=idle_time)
uptime.save()
#-------------------------------------------------------------------------------
def save_counts(machine_config, counts_dict, data_log_date):
"""
Save the data that comes in "counts"
"""
# Prefix for the houdini tools
tools_prefix = "tools/"
for key, count in counts_dict.iteritems():
if key.startswith(tools_prefix):
save_tool_usage(
machine_config, tools_prefix, key, count, data_log_date)
else:
save_key_usage(
machine_config, key, count, data_log_date)
#-------------------------------------------------------------------------------
def save_tool_usage(machine_config, tools_prefix, key, count, data_log_date):
"""
Create HoudiniToolUsage object and save it in DB.
Schema: tools|location|tool_name
- location can be "shelf", "viewer/Object", "viewer/Sop",
"network/Object", "network/Sop", etc.
- tool_name can be "sop_box", or "SideFX::spaceship" or
blank if it's a custom tool
- the tool name can be followed by "(orbolt)" (if it's an orbolt tool) or
"(custom_tool)" if it's a nameless custom tool.
"""
is_asset = False
is_custom = False
for mode, name in HoudiniToolUsage.TOOL_CREATION_MODES:
prefix = tools_prefix + name
if key.startswith(prefix):
# Find "|" to get tool creation mode
pipe_pos = key.index("|")
tool_creation_location = key[len(prefix)+1: pipe_pos]
tool_name = key[pipe_pos +1:]
# Verify if tool type is a custom_tool
if "(custom_tool)" in tool_name:
tool_name = re.sub('[\(\)]', "", tool_name)
is_custom = True
# Verify if tool type is an Orbolt asset
elif "(orbolt)" in tool_name:
tool_name = tool_name.replace("(orbolt)","")
is_asset = True
tools_usage = HoudiniToolUsage(
stats_machine_config=machine_config,
date=data_log_date,
tool_name=tool_name,
tool_creation_location=tool_creation_location,
tool_creation_mode=mode,
count=count,
is_builtin=(not is_asset and not is_custom),
is_asset=is_asset)
tools_usage.save()
break
#-------------------------------------------------------------------------------
def save_key_usage(machine_config, key, count, data_log_date):
"""
Create HoudiniUsageCount object and save it in DB.
"""
key_usage = HoudiniUsageCount(
stats_machine_config=machine_config,
date=data_log_date,
key=key,
count=count)
key_usage.save()
#-------------------------------------------------------------------------------
def persistent_stats(machine_config, persistent_stats_dict, data_log_date):
"""
Save the data that comes in persistent stats
"""
for key, value in persistent_stats_dict.iteritems():
# Try to find the key value pair and if it doesn't exists insert a new
# one
try:
key_value_pair = HoudiniPersistentStatsKeyValuePair.objects.get(
key=key, value=value)
except:
key_value_pair = HoudiniPersistentStatsKeyValuePair(key=key,
value=value)
key_value_pair.save()
assert key_value_pair is not None
# Get houdini_major_version and houdini_minor_version from machine
# config extension
machine_config_ext = HoudiniMachineConfig.objects.get(
machine_config = machine_config)
# Try to find if there is a not a persistent stats like this one already
# and if so update it if needed, if not insert a new one
try:
# Get houdini permanent stats
hou_per_stats = HoudiniPersistentStats.objects.get(
machine = machine_config.machine,
houdini_major_version = machine_config_ext.houdini_major_version,
houdini_minor_version = machine_config_ext.houdini_minor_version,
)
# Find if there is an entry that already contains this persistent
# stats and update it if needed
hou_per_stats_entry = HoudiniPersistentStatsEntry.objects.filter(
persistent_stats = hou_per_stats)
need_to_add = True
for entry in hou_per_stats_entry:
# See if we have a matching key and value. If so, we won't
# add anything later.
if entry.persistent_stats_kvp == key_value_pair:
need_to_add = False
break
# If we have a matching key but different value, delete the
# old pair and add the new one later.
if entry.persistent_stats_kvp.key == key:
hou_per_stats_entry.delete()
break
if need_to_add:
hou_per_stats_entry = HoudiniPersistentStatsEntry(
persistent_stats = hou_per_stats,
persistent_stats_kvp = key_value_pair)
hou_per_stats_entry.save()
except:
# Create persistent stats object and save it
hou_per_stats = HoudiniPersistentStats(date = data_log_date,
machine = machine_config.machine, hash = "",
houdini_major_version = machine_config_ext.houdini_major_version,
houdini_minor_version = machine_config_ext.houdini_minor_version,
)
hou_per_stats.save()
# Create persistent stats entry object and save it
hou_per_stats_entry = HoudiniPersistentStatsEntry(
persistent_stats = hou_per_stats,
persistent_stats_kvp = key_value_pair)
hou_per_stats_entry.save()
#-------------------------------------------------------------------------------
def save_strings(machine_config, strings_dict, data_log_date):
"""
Save the data that comes in "strings"
"""
for key, value in strings_dict.iteritems():
houdini_string = HoudiniString(
stats_machine_config=machine_config,
date=data_log_date,
key=key,
value=value)
houdini_string.save()
#-------------------------------------------------------------------------------
def save_sums_and_counts(machine_config, sums_and_counts, data_log_date):
"""
Save sums and counts in DB.
"sums_and_counts":{
"cook/SOP_xform/time": [0.524806, 4171],
"cook/SOP_scatter/time": [0.041588, 3],
"cook/SOP_merge/time": [0.041572, 3],
"cook/mantra/mantra1/time": [36.195406, 1],
"cook/SOP_copy/time": [1.512519, 3]
}
"""
for key, sum_count in sums_and_counts.iteritems():
sum_and_count_object = HoudiniSumAndCount(
stats_machine_config=machine_config,
date=data_log_date,
key=key,
sum=sum_count[0],
count=sum_count[1])
sum_and_count_object.save()
#-------------------------------------------------------------------------------
def save_flags(machine_config, flags, data_log_date):
"""
Save flags in DB.
"flags":[ "key1", "key2", "key3" ]
"""
for key in flags:
flag_object = HoudiniFlag(
stats_machine_config=machine_config,
date=data_log_date,
key=key)
flag_object.save()
#-------------------------------------------------------------------------------
def save_logs(machine_config, logs, data_log_date):
"""
Save logs in DB.
"logs": {
"web_server": {
"80.511179": "user requested page /",
"90.234239": "user requested page /index"
}
"""
for key, values in logs.iteritems():
for timestamp, log_entry in values.iteritems():
log_object = HoudiniLog(
stats_machine_config=machine_config,
date=data_log_date,
key=key,
timestamp=timestamp,
log_entry=log_entry)
log_object.save()
#-------------------------------------------------------------------------------
def save_error_log(description, stack_trace, ip_address):
"""
Create ErrorLog object and save it in DB.
"""
error_log = ErrorLog(
description=description,
date=datetime.datetime.now(),
stack_trace=stack_trace,
ip_address=ip_address)
error_log.save()
#-------------------------------------------------------------------------------
def save_data_log_to_file(date, config_hash, json_data, ip_adress):
"""
Save the received data log to a text file
"""
with open(_this_dir + "/../houdini_logs.txt", "a") as log_file:
log_file.write("""\n Date log saved: {0}, IP: {1}, Config Hash: {2}, Date: {3} \n {4}
""".format(datetime.datetime.now(), ip_adress,
config_hash, date, str(json_data)))
#-------------------------------------------------------------------------------
def date_range_to_seconds(datetime1, datetime2):
"""
Computes the number of seconds between two datetime
"""
return (datetime2 - datetime1).total_seconds()
def seconds_to_multiple_time_units(secs):
"""
This function receives a number of seconds and return how many min, hours,
days those seconds represent.
"""
return {
"seconds": int(secs),
"minutes": round(int(secs) / 60.0),
"hours": round(int(secs) / (60.0 * 60.0)),
"days": round(int(secs) / (60.0 * 60.0 * 24.0)),
}
#-------------------------------------------------------------------------------
def get_percent(part, whole):
"""
Get which percentage is a from b, and round it to 2 decimal numbers.
"""
return round(100 * float(part)/float(whole) if whole != 0 else 0.0, 2)
def get_difference(num1, num2):
"""
Get difference between number one and number two.
"""
return num1 - num2
#-------------------------------------------------------------------------------
def get_lat_and_long(ip):
"""
Get the values of the latitude and long by ip address
"""
g = GeoIP(cache=GeoIP.GEOIP_MEMORY_CACHE)
return g.lat_lon(str(ip))#lat, long
def get_ip_address(request):
"""
Get the ip address from the machine doing the request.
"""
return request.META.get("REMOTE_ADDR", "0.0.0.0")
def _get_valid_date_or_error(str_date):
"""
Convert a string date to a valid datetime object or return error message.
"""
try:
return time.strptime(str_date, '%d/%m/%Y')
except:
raise ServerError(
"INVALID DATE: %(date)s.\n"
"The date format must be 'dd/mm/yyyy'.\n"
"You can fix the dates in the url and try again.\n",
date=str_date)
def _reset_time_for_date(date):
"""
Set time on a datetime to 00:00:00
"""
return date.replace(hour=0, minute=0, second=0, microsecond=0)
def _get_yesterdays_date():
"""
Get yesterday's date
"""
return datetime.datetime.now() - datetime.timedelta(hours=24)
def _get_months_ago_date(months = 3):
"""
Get n-months ago date. Starting from yesterday's date.
"""
return _reset_time_for_date(
_get_yesterdays_date() + relativedelta(months = -months))
def _get_start_request(request, aggregation, minimum_start_date=None):
"""
Get start date from the request.
"""
start_request = request.GET.get("start", None)
if start_request is not None:
t = _get_valid_date_or_error(start_request)
start = datetime.datetime(t.tm_year, t.tm_mon, t.tm_mday)
elif minimum_start_date is not None:
# Date when we started collecting good data for this report
start = max(minimum_start_date, _get_months_ago_date())
else:
# The start date will be three months from yesterday's date
start = _get_months_ago_date()
return _adjust_start_date(start, aggregation)
def _adjust_start_date(start_date, aggregation):
"""
Adjust the start date depending on the aggregation
"""
if aggregation == "weekly":
# Return the Monday of the starting date week
return start_date - datetime.timedelta(days = start_date.weekday())
if aggregation == "monthly":
# Return the fist day of the starting date's month
return datetime.datetime(start_date.year, start_date.month, 1)
if aggregation == "yearly":
# Return the first day of the first month of the current year
return datetime.datetime(start_date.year, 1, 1)
# Daily aggregation
return start_date
def _get_end_request(request):
"""
Get end date from the request.
"""
end_request = request.GET.get("end", None)
if end_request is not None:
t = _get_valid_date_or_error(end_request)
end = datetime.datetime(t.tm_year, t.tm_mon, t.tm_mday)
else:
# We get yesterday's date
end = _reset_time_for_date(_get_yesterdays_date())
return end
#-------------------------------------------------------------------------------
def _get_aggregation(get_vars):
"""
Get aggregation from the request.GET.
If there is not aggregation we set it to daily by default.
"""
# For aggregation
valid_agg = ["monthly", "weekly", "yearly", "daily"]
if "ag" not in get_vars:
return "daily"
aggregation = get_vars["ag"].lower()
if aggregation not in valid_agg and aggregation !="inherit":
raise ServerError(
"INVALID AGGREGATION: %(agg)s.\n"
"The valid aggregations are:\n"
"'daily', 'weekly', 'monthly' or 'yearly'.\n"
"You can fix the aggregation in the url and try again.\n",
agg=aggregation)
elif aggregation=="inherit":
return "daily"
return aggregation
def get_common_vars_for_charts(request, minimum_start_date=None):
"""
Get all variables that will be used for the reports.
"""
aggregation = _get_aggregation(request.GET)
return [_get_start_request(request, aggregation, minimum_start_date),
_get_end_request(request)], aggregation
#-------------------------------------------------------------------------------
def get_list_of_tuples_from_list(list):
"""
Get a list of tuples from a list.
For example given:
[1,2,3,4,5,6]
Return [(1,2),(3,4)(5,6)]
"""
output = []
item = []
for i in list:
item.append(i)
if len(item) == 2:
output.append(item)
item = []
if item:
output.append(item)
return output
#-------------------------------------------------------------------------------
def sigdig(value, digits = 3):
"""
Return float number with certain amount of significant digits
"""
order = int(math.floor(math.log10(math.fabs(value))))
places = digits - order - 1
if places > 0:
fmtstr = "%%.%df" % (places)
else:
fmtstr = "%.0f"
return fmtstr % (round(value, places))
#-------------------------------------------------------------------------------
def validate_log_date(start_time, end_time):
"""
Validate that the log dates are not greater than the current date.
Return log date to use for the data logging too.
"""
current_date = datetime.datetime.now()
if current_date < datetime.datetime.fromtimestamp(start_time) or \
current_date < datetime.datetime.fromtimestamp(end_time):
return False, current_date
return True, datetime.datetime.fromtimestamp(start_time)
| StarcoderdataPython |
174853 | <filename>Lib/compositor/scriptList.py<gh_stars>1-10
"""
ScriptList object (and friends).
"""
__all__ = ["ScriptList", "ScriptRecord", "ScriptCount", "LangSysRecord", "LangSysCount"]
class ScriptList(object):
__slots__ = ["ScriptCount", "ScriptRecord"]
def __init__(self):
self.ScriptCount = 0
self.ScriptRecord = None
def loadFromFontTools(self, scriptList):
self.ScriptCount = scriptList.ScriptCount
self.ScriptRecord = [ScriptRecord().loadFromFontTools(record) for record in scriptList.ScriptRecord]
return self
class ScriptRecord(object):
__slots__ = ["ScriptTag", "Script"]
def __init__(self):
self.ScriptTag = None
self.Script = None
def loadFromFontTools(self, scriptRecord):
self.ScriptTag = scriptRecord.ScriptTag
self.Script = Script().loadFromFontTools(scriptRecord.Script)
return self
class Script(object):
__slots__ = ["DefaultLangSys", "LangSysCount", "LangSysRecord"]
def __init__(self):
self.DefaultLangSys = None
self.LangSysCount = 0
self.LangSysRecord = []
def loadFromFontTools(self, script):
self.DefaultLangSys = None
if script.DefaultLangSys is not None:
self.DefaultLangSys = LangSys().loadFromFontTools(script.DefaultLangSys)
self.LangSysCount = script.LangSysCount
self.LangSysRecord = [LangSysRecord().loadFromFontTools(record) for record in script.LangSysRecord]
return self
class LangSysRecord(object):
__slots__ = ["LangSysTag", "LangSys"]
def __init__(self):
self.LangSysTag = None
self.LangSys = None
def loadFromFontTools(self, langSysRecord):
self.LangSysTag = langSysRecord.LangSysTag
self.LangSys = LangSys().loadFromFontTools(langSysRecord.LangSys)
return self
class LangSys(object):
__slots__ = ["LookupOrder", "ReqFeatureIndex", "FeatureCount", "FeatureIndex"]
def __init__(self):
self.LookupOrder = None
self.ReqFeatureIndex = None
self.FeatureCount = 0
self.FeatureIndex = []
def loadFromFontTools(self, langSys):
self.LookupOrder = langSys.LookupOrder # XXX?
self.ReqFeatureIndex = langSys.ReqFeatureIndex
self.FeatureCount = langSys.FeatureCount
self.FeatureIndex = list(langSys.FeatureIndex)
return self
| StarcoderdataPython |
3230460 | <reponame>ismailah28/URL-Shortener
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.views import View
from analytics.models import ClickEvent
from .forms import SubmitUrlForm
from .models import KirrUrl
# Create your views here.
class HomeView(View):
def get(self, request, shortcode=None, *args, **kwargs):
the_form = SubmitUrlForm()
context = {
'form':the_form,
}
return render(request, 'shortener/home.html', context)
def post(self, request, shortcode=None, *args, **kwargs):
form = SubmitUrlForm(request.POST)
context = {
'form':form,
}
template = 'shortener/home.html'
if form.is_valid():
new_url = form.cleaned_data.get('url')
obj, created = KirrUrl.objects.get_or_create(url=new_url)
context = {
'object':obj,
'created': created,
}
if created:
template = 'shortener/success.html'
else:
template = 'shortener/already-exist.html '
return render(request, template, context)
class URLRedirectView(View):
def get(self, request, shortcode=None, *args, **kwargs):
qs = KirrUrl.objects.filter(shortcode__iexact=shortcode)
if qs.count() != 1 and not qs.exist():
raise Http404
obj = qs.first()
print(ClickEvent.objects.create_event(obj))
#obj = get_object_or_404(KirrUrl, shortcode=shortcode)
#ClickEvent.objects.create_event(obj)
return HttpResponseRedirect(obj.url) | StarcoderdataPython |
1766233 | <reponame>Yuhta/dfcompare<gh_stars>0
from dfcompare import BufferedIterator, compare, Identical, Different, Unmatched
import pandas as pd
import unittest
class TestBufferedIterator(unittest.TestCase):
def test_head(self):
it = BufferedIterator(iter([1, 2, 3]))
self.assertEqual(it.head(), 1)
self.assertEqual(next(it), 1)
self.assertEqual(next(it), 2)
self.assertEqual(it.head(), 3)
self.assertEqual(next(it), 3)
with self.assertRaises(StopIteration):
it.head()
def test_bool(self):
it = BufferedIterator(iter([42]))
self.assertTrue(it)
next(it)
self.assertFalse(it)
class TestCompare(unittest.TestCase):
def test_no_sort(self):
l = pd.DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'quux']})
r = pd.DataFrame({'A': [1, 4, 3], 'B': ['foo', 'bar', 'quuy']})
d = list(compare(l, r, sort=False))
self.assertEqual(len(d), 3)
self.assertIsInstance(d[0], Identical)
self.assertEqual(d[0].left, d[0].right)
self.assertIsInstance(d[1], Different)
self.assertEqual(d[1].left, (1, 2, 'bar'))
self.assertEqual(d[1].right, (1, 4, 'bar'))
self.assertEqual(d[1].diff, [0])
self.assertIsInstance(d[2], Different)
self.assertEqual(d[2].diff, [1])
def test_multi_index(self):
index = pd.MultiIndex.from_tuples(((2, 1), (1, 2), (1, 1)))
l = pd.DataFrame({'A': [1, 2, 3]}, index=index)
index = pd.MultiIndex.from_tuples(((1, 2), (1, 1), (2, 1)))
r = pd.DataFrame({'A': [4, 3, 1]}, index=index)
d = list(compare(l, r))
self.assertEqual(len(d), 3)
self.assertIsInstance(d[0], Identical)
self.assertIsInstance(d[1], Different)
self.assertEqual(d[1].left, ((1, 2), 2))
self.assertEqual(d[1].right, ((1, 2), 4))
self.assertIsInstance(d[2], Identical)
def test_in_left_only(self):
l = pd.DataFrame({'A': [1, 2, 3]}, index=(1, 2, 3))
r = pd.DataFrame({'A': [1, 2]}, index=(1, 3))
d = list(compare(l, r))
self.assertEqual(len(d), 3)
self.assertIsInstance(d[1], Unmatched)
self.assertEqual(d[1].side, 0)
def test_in_right_only(self):
l = pd.DataFrame({'A': [1, 2]}, index=(1, 3))
r = pd.DataFrame({'A': [1, 2, 3]}, index=(1, 2, 3))
d = list(compare(l, r))
self.assertEqual(len(d), 3)
self.assertIsInstance(d[1], Unmatched)
self.assertEqual(d[1].side, 1)
def test_external_sort(self):
l = [pd.DataFrame({'A': [1, 2, 3]}, index=(1, 2, 3)),
pd.DataFrame({'A': [4, 5, 6]}, index=(4, 5, 6))]
r = [pd.DataFrame({'A': [6, 5, 4]}, index=(6, 5, 4)),
pd.DataFrame({'A': [3, 2, 1]}, index=(3, 2, 1))]
d = list(compare(iter(l), iter(r), iterator=True))
self.assertEqual(len(d), 6)
for diff in d:
self.assertIsInstance(diff, Identical)
self.assertEqual([diff.left[0] for diff in d], list(range(1, 7)))
def test_external_sort_multi_index(self):
l = [pd.DataFrame({'A': [1, 2, 3], 'B': [1, 2, 3], 'C': [1, 2, 3]}).set_index(['A', 'B']),
pd.DataFrame({'A': [4, 5, 6], 'B': [4, 5, 6], 'C': [4, 5, 6]}).set_index(['A', 'B'])]
r = [pd.DataFrame({'A': [6, 5, 4], 'B': [6, 5, 4], 'C': [6, 5, 4]}).set_index(['A', 'B']),
pd.DataFrame({'A': [3, 2, 1], 'B': [3, 2, 1], 'C': [3, 2, 1]}).set_index(['A', 'B'])]
d = list(compare(iter(l), iter(r), iterator=True))
self.assertEqual(len(d), 6)
for diff in d:
self.assertIsInstance(diff, Identical)
self.assertEqual([diff.left[0] for diff in d], [(i, i) for i in range(1, 7)])
if __name__ == '__main__' and '__file__' in globals():
unittest.main()
| StarcoderdataPython |
1784762 | from typing import Any
""" Class to encode and decode the input string """
class HuffmanCoding:
def recur(self, node, vi, val=''):
""" Recur to add nodes with huffman codes """
new_v = val + str(node.hufcode)
if(node.left):
self.recur(node.left, vi, new_v)
if (node.right):
self.recur(node.right, vi, new_v)
if not node.left and not node.right:
vi[node.sym] = new_v
return vi
def build(self, text: str) -> Any:
""" Function to build a huffman tree """
d = {}
for ch in text:
if ch in d:
d[ch] += 1
else:
d[ch] = 1
nodes = []
for i in d:
nodes.append(Node(d[i], i))
while len(nodes) > 1:
nodes = sorted(nodes, key=lambda x: x.freq)
l = nodes[0]
r = nodes[1]
l.hufcode = '0'
r.hufcode = '1'
new_node = Node(l.freq+r.freq, l.sym+r.sym, l, r)
nodes.remove(l)
nodes.remove(r)
nodes.append(new_node)
y = {}
w = self.recur(nodes[0], y)
encoded = self.encode(w, text)
print(encoded)
decoded = self.decode(w, encoded)
print(decoded)
return w
def encode(self, Dic: Any, text: str) -> str:
""" Function to encode the input text using a dictionary(hufman tree) """
s = ''
for i in text:
s += Dic[i]
return s
def decode(self, Dic: Any, text: str) -> str:
""" Functiont to decode the code """
d = {}
for k, v in Dic.items():
d[v] = k
i = 1
final = ''
while(len(text) != 0):
if text[:i] in d.keys():
final += d[text[:i]]
text = text[i:]
i = 1
else:
i += 1
return final
""" Class to generate the Node/Tree """
class Node:
def __init__(self, freq, sym, left=None, right=None):
self.freq = freq
self.sym = sym
self.left = left
self.right = right
self.hufcode = ''
print(HuffmanCoding().build("AABCCDDDD"))
| StarcoderdataPython |
176760 | <filename>test/bar_test.py
# -*- coding:utf-8 -*-
'''
Created on 2017/9/24
@author: <NAME>
'''
import unittest
import tushare.stock.trading as fd
class Test(unittest.TestCase):
def set_data(self):
self.code = '300770'
self.start = ''
self.end = ''
def test_bar_data(self):
self.set_data()
print(fd.bar(self.code, self.start, self.end))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | StarcoderdataPython |
3325468 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
readme = f.read()
with codecs.open(os.path.join(here, 'CHANGELOG.rst'), encoding='utf-8') as f:
history = f.read()
requirements = []
setup(
name='kinto-woleet',
version='0.1.0.dev0',
description="Woleet Anchors Callback URL",
long_description=readme + '\n\n' + history,
author="Mozilla",
author_email='<EMAIL>',
url='https://github.com/Kinto/kinto-woleet',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
license="Apache License (2.0)",
zip_safe=False,
keywords='kinto woleet',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| StarcoderdataPython |
34173 | '''
191. Number of 1 Bits
Write a function that takes an unsigned integer and returns the number of ’1' bits it has (also known as the Hamming weight).
For example, the 32-bit integer ’11' has binary representation 00000000000000000000000000001011, so the function should return 3.
'''
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
que=[]
offset = 2
while n!=0:
if n%offset == 1:
que.append(1)
n=n//2
return len(que)
if __name__ == '__main__':
solution = Solution()
t1=11
print(solution.hammingWeight(t1))
| StarcoderdataPython |
3289463 | <reponame>mrshu/stash
#!/usr/bin/env python
import sys, os, re, json, argparse, time, pytz
import console
from datetime import datetime, timedelta
from difflib import unified_diff, ndiff
def argue():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-s', '--symbolic', action='store_true')
parser.add_argument('-f', '--force', action='store_true')
parser.add_argument('lhs')
parser.add_argument('rhs')
args = parser.parse_args()
if args.verbose:
json.dump(vars(args),sys.stderr,indent=4)
return args
def ln(lhs,rhs,symbolic=False):
if not os.path.exists(lhs):
sys.stderr.write('%s not found\n'%lhs)
sys.exit(1)
if os.path.isdir(rhs):
rhs = '%s/%s'%(rhs,os.path.basename(lhs))
if os.path.isfile(rhs):
sys.stderr.write('%s already exists\n'%rhs)
sys.exit(1)
if os.path.islink(rhs):
sys.stderr.write('%s already linked\n'%rhs)
sys.exit(1)
if symbolic:
os.symlink(lhs,rhs)
else:
os.link(lhs,rhs)
return
def main():
console.clear()
args = argue()
ln(args.lhs,args.rhs,args.symbolic)
return
if __name__ == '__main__': main()
| StarcoderdataPython |
1741452 | <filename>7/7/finall/models/weather.py<gh_stars>1-10
class Weather:
def __init__(self, city_name, temp, feel, temp_max, temp_min, humidity):
self.city_name = city_name
self.temp = temp
self.feel = feel
self.temp_min = temp_min
self.temp_max = temp_max
self.humidity = humidity
def __repr__(self):
return f"Aktualna warunki pogodowe dla miasta {self.city_name}: \n" \
f" Temperatura: {self.temp}\n Temperatura odczuwalna: {self.feel}" \
f"\n Wilgotność: {self.humidity}"
| StarcoderdataPython |
3267022 | #! /usr/bin/env python3
import sys
import os
from pydub import AudioSegment
from pydub.playback import play
def read_header(lines):
bpm = None
offset = None
audio_name = None
i = None
header = []
for i in range(len(lines)):
if lines[i][:4] == "BPM:":
bpm = float(lines[i][4:])
elif lines[i][:7] == "OFFSET:":
offset = float(lines[i][7:])
elif lines[i][:5] == "WAVE:":
audio_name = lines[i][5:].strip()
elif lines[i][:6] == "TITLE:":
header.append(lines[i][:6] + "practice_" + lines[i][6:].strip() + "\n")
elif lines[i].strip() == "#START":
break
elif lines[i][:10] == "DEMOSTART:":
continue
else:
header.append(lines[i])
return (i, bpm, offset, header, audio_name)
def get_delimited_lines(lines, pos, offset, bpm, start, end):
delimited_lines = []
cur_time = offset * -1 * 1000
measure = 4
start_audio = None
start_bpm = None
start_measure = None
while lines[pos].strip() != "#END" and cur_time < end:
if cur_time > start:
if start_audio == None:
start_audio = cur_time
start_bpm = bpm
start_measure = measure
delimited_lines.append(lines[pos])
if lines[pos][:9] == "#MEASURE ":
timing_sig = lines[pos][9:].strip().split('/')
measure = float(timing_sig[0]) / (float(timing_sig[1]) / 4)
elif lines[pos][:11] == "#BPMCHANGE ":
bpm = float(lines[pos][11:])
elif lines[pos][:7] == "#DELAY ":
cur_time = cur_time + float(lines[pos][7:]) * 1000
elif lines[pos].strip() != "" and lines[pos].strip()[-1] == ',':
cur_time = cur_time + (60 / bpm) * measure * 1000
pos += 1
return (delimited_lines, start_audio, cur_time, start_bpm, start_measure)
def export_audio(start, end, audio, filename, fadeout = 0):
cut_audio = audio[start:][:(end - start + fadeout)]
if fadeout > 0:
cut_audio = cut_audio.fade_out(fadeout)
cut_audio.export(filename, format="ogg")
def write_tja(header, bpm, audio_name, filename, lines, measure):
out_file = open(filename, "w")
out_file.writelines(header)
out_file.write("WAVE:" + audio_name + "\n")
out_file.write("BPM:" + str(bpm) + "\n")
out_file.write("OFFSET:0\n\n")
if lines[0].strip() != "#START":
out_file.write("#START\n")
out_file.write("#MEASURE " + str(measure) + "/4\n")
out_file.writelines(lines)
out_file.write("#END\n")
def main(filename):
file = open(filename, "r")
lines = file.readlines()
(pos, bpm, offset, header, audio_name) = read_header(lines)
if (bpm == None or offset == None or audio_name == None):
print("Error: BPM, offset or audio file not found", file=sys.stderr)
exit(1)
(delimited_lines, start, end, start_bpm, start_measure) = get_delimited_lines(lines, pos, offset, bpm, float(sys.argv[2]), float(sys.argv[3]))
tja_dir = os.path.dirname(os.path.realpath(file.name))
audio = AudioSegment.from_file(os.path.join(tja_dir, audio_name))
cut_audio_name = "practice_" + os.path.splitext(audio_name)[0] + ".ogg"
if len(sys.argv) > 4:
export_audio(start, end, audio, os.path.join(tja_dir, cut_audio_name), int(sys.argv[4]))
else:
export_audio(start, end, audio, os.path.join(tja_dir, cut_audio_name))
write_tja(header, start_bpm, cut_audio_name, os.path.join(tja_dir, "practice_" + os.path.basename(file.name)), delimited_lines, start_measure)
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Usage: tjapractice.py [tja file] [start time] [end time] [fade-out (optional)] (in ms)", file=sys.stderr)
else:
main(sys.argv[1]) | StarcoderdataPython |
1696300 | # Find the maximum element in an array which is first increasing and then decreasing
# Given an array of integers which is initially increasing and then decreasing,
# find the maximum value in the array.
# Examples :
# Input: arr[] = {8, 10, 20, 80, 100, 200, 400, 500, 3, 2, 1}
# Output: 500
# Input: arr[] = {1, 3, 50, 10, 9, 7, 6}
# Output: 50
# Corner case (No decreasing part)
# Input: arr[] = {10, 20, 30, 40, 50}
# Output: 50
# Corner case (No increasing part)
# Input: arr[] = {120, 100, 80, 20, 0}
# Output: 120
def findMax(arr, low, high):
# Base Case: Only one element is present in arr[low..high]*/
if low == high:
return arr[low]
# If there are two elements and first is greater then
# the first element is maximum
if high == low + 1 and arr[low] >= arr[high]:
return arr[low]
# If there are two elements and second is greater then
# the second element is maximum */
if high == low + 1 and arr[low] < arr[high]:
return arr[high]
mid = (high + low) // 2
# If we reach a point where arr[mid] is greater than both of
# its adjacent elements arr[mid-1] and arr[mid+1], then arr[mid]
# is the maximum element
if arr[mid] > arr[mid + 1] and arr[mid] > arr[mid - 1]:
return arr[mid]
# If arr[mid] is greater than the next element and smaller than the previous
# element then maximum lies on left side of mid */
if arr[mid] > arr[mid + 1] and arr[mid] < arr[mid - 1]:
return findMax(arr, low, mid-1)
else: # when arr[mid] is greater than arr[mid-1] and smaller than arr[mid+1]
return findMax(arr, mid + 1, high)
arr = [1, 3, 50, 10, 9, 7, 6]
print(findMax(arr, 0, len(arr)-1))
| StarcoderdataPython |
43138 | from box import Box
from src import repos
from src.processors import SelfIteratingProcessor
from src.processors import use_cases
def CallbackDelivery(config: Box = None):
use_case = use_cases.DeliverCallbackUseCase(
delivery_outbox_repo=repos.DeliveryOutbox(config.DELIVERY_OUTBOX_REPO),
topic_base_self_url=config.TOPIC_BASE_SELF_URL,
channel_url=config.CHANNEL_URL
)
return SelfIteratingProcessor(use_case=use_case)
| StarcoderdataPython |
1747545 | from __future__ import division
from __future__ import absolute_import
import pytest
from returns.future import FutureResult, future_safe
from returns.io import IOResult, IOSuccess
@future_safe
async def _coro(arg):
return 1 / arg
@pytest.mark.anyio()
async def test_future_safe_decorator():
u"""Ensure that coroutine marked with ``@future_safe``."""
future_instance = _coro(2)
assert isinstance(future_instance, FutureResult)
assert await future_instance == IOSuccess(0.5)
@pytest.mark.anyio()
async def test_future_safe_decorator_failure():
u"""Ensure that coroutine marked with ``@future_safe``."""
future_instance = _coro(0)
assert isinstance(future_instance, FutureResult)
assert isinstance(await future_instance, IOResult.failure_type)
| StarcoderdataPython |
110447 | import click
from coder.app import create_app
from coder.extensions import db
from coder.blueprints.billing.gateways.stripecom import Plan as PaymentPlan
# Create an app context for the database connection.
app = create_app()
db.app = app
@click.group()
def cli():
""" Perform various tasks with Stripe's API. """
pass
@click.command()
def sync_plans():
"""
Sync (upsert) STRIPE_PLANS to Stripe.
:return: None
"""
if app.config['STRIPE_PLANS'] is None:
return None
for _, value in app.config['STRIPE_PLANS'].items():
plan = PaymentPlan.retrieve(value.get('id'))
if plan:
PaymentPlan.update(id=value.get('id'),
name=value.get('name'),
metadata=value.get('metadata'),
statement_descriptor=value.get(
'statement_descriptor'))
else:
PaymentPlan.create(**value)
return None
@click.command()
@click.argument('plan_ids', nargs=-1)
def delete_plans(plan_ids):
"""
Delete 1 or more plans from Stripe.
:return: None
"""
for plan_id in plan_ids:
PaymentPlan.delete(plan_id)
return None
@click.command()
def list_plans():
"""
List all existing plans on Stripe.
:return: Stripe plans
"""
click.echo(PaymentPlan.list())
cli.add_command(sync_plans)
cli.add_command(delete_plans)
cli.add_command(list_plans)
| StarcoderdataPython |
61025 | <reponame>renmengye/inc-few-shot-attractor-public
"""Runs a baseline for prototype networks for incremental few-shot learning.
Author: <NAME> (<EMAIL>)
See run_exp.py for usage.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import os
import six
import tensorflow as tf
from tqdm import tqdm
from fewshot.utils import logger
from run_exp import (get_config, get_restore_saver, get_datasets, get_model,
save_config, get_exp_logger, get_saver, restore_model,
final_log)
from train_lib import get_metadata
log = logger.get()
FLAGS = tf.flags.FLAGS
def calculate_protos(sess, model, num_classes_a, task_a_it, num_steps):
"""Calculates the prototypes of the entire training set."""
prototypes = []
for idx in six.moves.xrange(num_classes_a):
prototypes.append([])
for step in six.moves.xrange(num_steps):
x, y = task_a_it.next()
h = sess.run(model.h_a, feed_dict={model.inputs: x})
for jj, idx in enumerate(y):
prototypes[idx].append(h[jj])
for idx in six.moves.xrange(num_classes_a):
prototypes[idx] = np.array(prototypes[idx]).mean(axis=0)
return np.array(prototypes)
def calculate_episode_protos(sess, model, num_classes_a, nway, episode,
old_and_new):
"""Caluclates the prototypes of a single episode."""
prototypes = []
for idx in six.moves.xrange(nway):
prototypes.append([])
h = sess.run(model.h_a, feed_dict={model.inputs: episode.x_train})
for idx in six.moves.xrange(episode.x_train.shape[0]):
if old_and_new:
prototypes[episode.y_train[idx] - num_classes_a].append(h[idx])
else:
prototypes[episode.y_train[idx]].append(h[idx])
for idx in six.moves.xrange(nway):
prototypes[idx] = np.array(prototypes[idx]).mean(axis=0)
return np.array(prototypes)
def cosine(h, protos):
"""Cosine similarity."""
proto_t = protos.T
result = np.dot(h, proto_t) / np.sqrt(np.sum(
h**2, axis=1, keepdims=True)) / np.sqrt(
np.sum(proto_t**2, axis=0, keepdims=True))
return result
def euclidean(h, protos):
"""Euclidean similarity."""
h_ = np.expand_dims(h, 1)
protos_ = np.expand_dims(protos, 0)
return -np.sum((h_ - protos_)**2, axis=2)
def dot(h, protos):
"""Dot product."""
return np.dot(h, protos.T)
def evaluate_b(sess,
model,
task_it,
num_steps,
num_classes_a,
num_classes_b,
prototypes_a=None,
old_and_new=False,
similarity='euclidean'):
"""Evaluate the model on task A."""
acc_list = np.zeros([num_steps])
if old_and_new:
acc_list_old = np.zeros([num_steps])
acc_list_new = np.zeros([num_steps])
acc_list_old2 = np.zeros([num_steps])
acc_list_new2 = np.zeros([num_steps])
it = tqdm(six.moves.xrange(num_steps), ncols=0)
for tt in it:
task_data = task_it.next()
prototypes_b = calculate_episode_protos(
sess, model, num_classes_a, num_classes_b, task_data, old_and_new)
if old_and_new:
all_prototypes = np.concatenate([prototypes_a, prototypes_b])
else:
all_prototypes = prototypes_b
h_test = sess.run(model.h_a, feed_dict={model.inputs: task_data.x_test})
if similarity == 'cosine':
logits = cosine(h_test, all_prototypes)
elif similarity == 'euclidean':
logits = euclidean(h_test, all_prototypes)
elif similarity == 'dot':
logits = dot(h_test, all_prototypes)
else:
raise ValueError('Unknown similarity function')
correct = np.equal(np.argmax(logits, axis=1),
task_data.y_test).astype(np.float32)
_acc = correct.mean()
acc_list[tt] = _acc
if old_and_new:
is_new = task_data.y_test >= num_classes_a
is_old = np.logical_not(is_new)
_acc_old = correct[is_old].mean()
_acc_new = correct[is_new].mean()
correct_new = np.equal(
np.argmax(logits[is_new, num_classes_a:], axis=1),
task_data.y_test[is_new] - num_classes_a).astype(np.float32)
_acc_new2 = correct_new.mean()
correct_old = np.equal(
np.argmax(logits[is_old, :num_classes_a], axis=1),
task_data.y_test[is_old]).astype(np.float32)
_acc_old2 = correct_old.mean()
acc_list_old[tt] = _acc_old
acc_list_new[tt] = _acc_new
acc_list_new2[tt] = _acc_new2
acc_list_old2[tt] = _acc_old2
it.set_postfix(
acc_b=u'{:.3f}±{:.3f}'.format(
np.array(acc_list).sum() * 100.0 / float(tt + 1),
np.array(acc_list).std() / np.sqrt(float(tt + 1)) * 100.0),
acc_b_old=u'{:.3f}±{:.3f}'.format(
np.array(acc_list_old).sum() * 100.0 / float(tt + 1),
np.array(acc_list_old).std() / np.sqrt(float(tt + 1)) * 100.0),
acc_b_old2=u'{:.3f}±{:.3f}'.format(
np.array(acc_list_old2).sum() * 100.0 / float(tt + 1),
np.array(acc_list_old2).std() / np.sqrt(float(tt + 1)) * 100.0),
acc_b_new=u'{:.3f}±{:.3f}'.format(
np.array(acc_list_new).sum() * 100.0 / float(tt + 1),
np.array(acc_list_new).std() / np.sqrt(float(tt + 1)) * 100.0),
acc_b_new2=u'{:.3f}±{:.3f}'.format(
np.array(acc_list_new2).sum() * 100.0 / float(tt + 1),
np.array(acc_list_new2).std() / np.sqrt(float(tt + 1)) * 100.0))
else:
it.set_postfix(acc_b=u'{:.3f}±{:.3f}'.format(
np.array(acc_list).sum() * 100.0 / float(tt + 1),
np.array(acc_list).std() / np.sqrt(float(tt + 1)) * 100.0))
results_dict = {
'acc': acc_list.mean(),
'acc_se': acc_list.std() / np.sqrt(float(acc_list.size))
}
if old_and_new:
results_dict['acc_old'] = acc_list_old.mean()
results_dict['acc_old_se'] = acc_list_old.std() / np.sqrt(
float(acc_list_old.size))
results_dict['acc_old2'] = acc_list_old2.mean()
results_dict['acc_old2_se'] = acc_list_old2.std() / np.sqrt(
float(acc_list_old2.size))
results_dict['acc_new'] = acc_list_new.mean()
results_dict['acc_new_se'] = acc_list_new.std() / np.sqrt(
float(acc_list_new.size))
results_dict['acc_new2'] = acc_list_new2.mean()
results_dict['acc_new2_se'] = acc_list_new2.std() / np.sqrt(
float(acc_list_new2.size))
results_dict['delta_a'] = results_dict['acc_old'] - results_dict['acc_old2']
results_dict['delta_b'] = results_dict['acc_new'] - results_dict['acc_new2']
results_dict['delta'] = 0.5 * (
results_dict['delta_a'] + results_dict['delta_b'])
return results_dict
def main():
# ------------------------------------------------------------------------
# Flags
nshot = FLAGS.nshot
dataset = FLAGS.dataset
nclasses_train = FLAGS.nclasses_b
nclasses_val = FLAGS.nclasses_b
nclasses_test = FLAGS.nclasses_b
num_test = FLAGS.ntest
is_eval = FLAGS.eval
nepisode_final = FLAGS.nepisode_final
run_test = FLAGS.test
pretrain = FLAGS.pretrain
retest = FLAGS.retest
tag = FLAGS.tag
# ------------------------------------------------------------------------
# Configuration
config = get_config(FLAGS.config)
opt_config = config.optimizer_config
old_and_new = config.transfer_config.old_and_new
similarity = config.protonet_config.similarity
# ------------------------------------------------------------------------
# Log folder
assert tag is not None, 'Please add a name for the experiment'
log_folder = os.path.join(FLAGS.results, dataset, 'n{}w{}'.format(
nshot, nclasses_val), tag)
log.info('Experiment ID {}'.format(tag))
if not os.path.exists(log_folder):
os.makedirs(log_folder)
elif not is_eval:
assert False, 'Folder {} exists. Pick another tag.'.format(log_folder)
# ------------------------------------------------------------------------
# Model
metadata = get_metadata(dataset)
with log.verbose_level(2):
model_dict = get_model(
config,
metadata['num_classes_a'],
nclasses_train,
nclasses_val,
nclasses_test,
is_eval=is_eval)
model = model_dict['val']
modelv = model_dict['val']
# ------------------------------------------------------------------------
# Dataset
seed = 0
with log.verbose_level(2):
data = get_datasets(dataset, metadata, nshot, num_test,
opt_config.batch_size, opt_config.num_gpu,
metadata['num_classes_a'], nclasses_train, nclasses_val,
nclasses_test, old_and_new, seed, True)
# ------------------------------------------------------------------------
# Save configurations
save_config(config, log_folder)
# ------------------------------------------------------------------------
# Log outputs
restore_saver = get_restore_saver(
retest=retest,
cosine_a=modelv.config.protonet_config.cosine_a,
reinit_tau=modelv.config.protonet_config.reinit_tau)
logger = get_exp_logger(log_folder)
saver = get_saver(log_folder)
# ------------------------------------------------------------------------
# Create a TensorFlow session
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)
# ------------------------------------------------------------------------
# Initialize model
restore_model(
sess, model, modelv, restore_saver, is_eval=is_eval, pretrain=pretrain)
# ------------------------------------------------------------------------
# Calculate prototypes A.
if old_and_new:
prototypes_a = calculate_protos(sess, model, model.num_classes_a,
data['a_train'], nepisode_final)
else:
prototypes_a = None
# ------------------------------------------------------------------------
# Run on val set.
results = {}
results['val_b'] = evaluate_b(
sess,
model,
data['b_val'],
nepisode_final,
model.num_classes_a,
nclasses_val,
prototypes_a=prototypes_a,
old_and_new=old_and_new,
similarity=similarity)
# ------------------------------------------------------------------------
# Run on test set.
if run_test:
results['test_b'] = evaluate_b(
sess,
model,
data['b_test'],
nepisode_final,
model.num_classes_a,
nclasses_val,
prototypes_a=prototypes_a,
old_and_new=old_and_new,
similarity=similarity)
# ------------------------------------------------------------------------
# Log results.
final_log(log_folder, results, old_and_new=old_and_new)
if __name__ == '__main__':
main()
| StarcoderdataPython |
185074 | <gh_stars>10-100
import asyncio
import re
import time
from hashlib import md5
from _config import Config
from shared import Shared
from log import Log
class Camera:
def __init__(self, camera_hash):
self.hash = camera_hash
self.url = self._parse_url(Config.cameras[camera_hash]['url'])
self.tcp_task = False
self.udp_ports, self.track_ids = [], []
self.description = {}
self.session_id, self.rtp_info, self.realm, self.nonce = None, None, None, None
self.cseq = 1
async def connect(self):
""" Open TCP socket and connect to the camera
"""
self.udp_ports = self._get_self_udp_ports()
try:
self.reader, self.writer = await asyncio.open_connection(self.url['host'], self.url['tcp_port'])
except Exception as e:
Log.print(f"Camera: error: can't connect [{self.hash}]: {e}")
return
await self._request('OPTIONS', self.url['url'])
reply, code = await self._request(
'DESCRIBE',
self.url['url'],
'Accept: application/sdp')
if code == 401:
self.realm, self.nonce = self._get_auth_params(reply)
reply, code = await self._request(
'DESCRIBE',
self.url['url'],
'Accept: application/sdp')
self.description = self._get_description(reply)
self.track_ids = self._get_track_ids(reply)
reply, code = await self._request(
'SETUP',
f'{self.url["url"]}/{self.track_ids[0]}',
self._get_transport_line(0))
self.session_id = self._get_session_id(reply)
if len(self.track_ids) > 1:
await self._request(
'SETUP',
f'{self.url["url"]}/{self.track_ids[1]}',
self._get_transport_line(1),
f'Session: {self.session_id}')
self.rtp_info = None
Log.write(f'Camera: connected [{self.hash}]')
async def play(self):
""" Start playing and proxy the stream to all connected clients
"""
cmd = (
'PLAY',
self.url['url'],
f'Session: {self.session_id}',
'Range: npt=0.000-')
if Config.tcp_mode:
""" Receive embedded (interleaved) binary data on existing TCP socket
"""
# Check if camera is not playing
if not self.tcp_task:
reply, code = await self._request(*cmd)
self.rtp_info = self._get_rtp_info(reply)
self.tcp_task = asyncio.create_task(self._interleave())
else:
reply, code = await self._request(*cmd)
self.rtp_info = self._get_rtp_info(reply)
await self._start_udp_server(0)
""" Open UDP socket and start datagrams proxy
"""
if self.description['audio']:
await self._start_udp_server(1)
async def close(self):
""" Close all opened sockets and transports
"""
self.writer.close()
if not Config.tcp_mode:
for _idx, transport in self.udp_transports.items():
transport.close()
Log.write(f'Camera: closed [{self.hash}]')
async def _interleave(self):
while True:
frame = await self.reader.read(2048)
clients = Shared.data[self.hash]['clients']
if not clients:
return
for session_id in list(clients):
await clients[session_id].write(frame)
async def _request(self, option, url, *lines):
""" Ask the camera option with given lines.
Returns reply and status code
"""
self._write(option, url, *lines)
data = await self.reader.read(2048)
if data[0:1] == b'$':
Log.print('Camera: read: interleaved binary data')
return None, 200
reply = data.decode()
Log.print(f'~~~ Camera: read:\n{reply}')
res = re.match(r'RTSP/1.0 (\d{3}) ([^\r\n]+)', reply)
if not res:
Log.print('Camera: error: invalid reply')
return reply, 0
return reply, int(res.group(1))
def _write(self, option, url, *lines):
cmd = f'{option} {url} RTSP/1.0\r\n' \
f'CSeq: {self.cseq}\r\n'
auth_line = self._get_auth_line(option)
if auth_line:
cmd += f'{auth_line}\r\n'
for row in lines:
if row:
cmd += f'{row}\r\n'
cmd += '\r\n'
Log.print(f'~~~ Camera: write\n{cmd}')
self.writer.write(cmd.encode())
self.cseq += 1
def _get_auth_params(self, reply):
""" Search digest auth realm and nonce in reply
"""
realm_nonce = re.match(r'.+?\nWWW-Authenticate:.+?realm="(.+?)", ?nonce="(.+?)"', reply, re.DOTALL)
if not realm_nonce:
raise RuntimeError('Invalid digest auth reply')
return realm_nonce.group(1), realm_nonce.group(2)
def _get_auth_line(self, option):
""" Encode auth "response" hash
"""
if not self.realm or not self.nonce:
return
ha1 = md5(f'{self.url["login"]}:{self.realm}:{self.url["password"]}'.encode('utf-8')).hexdigest()
ha2 = md5(f'{option}:{self.url["url"]}'.encode('utf-8')).hexdigest()
response = md5(f'{ha1}:{self.nonce}:{ha2}'.encode('utf-8')).hexdigest()
line = f'Authorization: Digest username="{self.url["login"]}", ' \
f'realm="{self.realm}" nonce="{self.nonce}", uri="{self.url["url"]}", response="{response}"'
return line
def _get_description(self, reply):
""" Search SDP (Session Description Protocol) in rtsp reply
"""
blocks = reply.split('\r\n\r\n', 2)
if len(blocks) < 2:
raise RuntimeError('Invalid DESCRIBE reply')
sdp = blocks[1].strip()
details = {'video': {}, 'audio': {}}
res = re.match(r'.+?\nm=video (.+?)\r\n', sdp, re.DOTALL)
if res:
details['video'] = {'media': res.group(1), 'bandwidth': '', 'rtpmap': '', 'format': ''}
res = re.match(r'.+?\nm=video .+?\nb=([^\r\n]+)', sdp, re.DOTALL)
if res:
details['video']['bandwidth'] = res.group(1)
res = re.match(r'.+?\nm=video .+?\na=rtpmap:([^\r\n]+)/([^\r\n]+)', sdp, re.DOTALL)
if res:
details['video']['rtpmap'] = res.group(1) + '/' + res.group(2)
details['video']['clk_freq'] = int(res.group(2))
res = re.match(r'.+?\nm=video .+?\na=fmtp:([^\r\n]+)', sdp, re.DOTALL)
if res:
details['video']['format'] = res.group(1)
res = re.match(r'.+?\nm=audio (.+?)\r\n', sdp, re.DOTALL)
if res:
details['audio'] = {'media': res.group(1), 'rtpmap': ''}
res = re.match(r'.+?\nm=audio .+?\na=rtpmap:([^\r\n]+)/([^\r\n]+)', sdp, re.DOTALL)
if res:
details['audio']['rtpmap'] = res.group(1) + '/' + res.group(2)
details['audio']['clk_freq'] = int(res.group(2))
return details
def _get_rtp_info(self, reply):
""" Search "RTP-Info" string in rtsp reply
"""
if not reply:
return
res = re.match(r'.+?\r\n(RTP-Info: .+?)\r\n', reply, re.DOTALL)
if not res:
raise RuntimeError('Invalid RTP-Info')
rtp_info = res.group(1)
seq = re.findall(r';seq=(\d+)', rtp_info)
rtptime = re.findall(r';rtptime=(\d+)', rtp_info)
if not seq or not rtptime:
raise RuntimeError('Invalid RTP-Info')
return {'seq': seq, 'rtptime': rtptime, 'starttime': time.time()}
def _get_track_ids(self, reply):
""" Search track ID in rtsp reply
"""
track_ids = re.findall(r'\na=control:.*?(track.*?\d)', reply, re.DOTALL)
if not track_ids:
raise RuntimeError('Invalid track ID in reply')
return track_ids
def _get_transport_line(self, idx):
""" Build new "Transport" line for given track index
"""
if Config.tcp_mode:
channel = '0-1' if not idx else '2-3'
return f'Transport: RTP/AVP/TCP;unicast;interleaved={channel}'
# track_id = self.track_ids[channel]
return 'Transport: RTP/AVP;unicast;' \
f'client_port={self.udp_ports[idx][0]}-{self.udp_ports[idx][1]}'
def _get_session_id(self, reply):
""" Search session ID in rtsp reply
"""
res = re.match(r'.+?\nSession: *([^;]+)', reply, re.DOTALL)
if not res:
raise RuntimeError('Invalid session ID')
return res.group(1)
def _get_self_udp_ports(self):
""" Calculate port number from free user ports range
"""
start_port = Config.start_udp_port
idx = list(Config.cameras.keys()).index(self.hash) * 4
return [
[start_port + idx, start_port + idx + 1],
[start_port + idx + 2, start_port + idx + 3]]
def _parse_url(self, url):
""" Get URL components
"""
parsed_url = re.match(r'(rtsps?)://((.+?):([^@]+)@)?(.+?):(\d+)(.+)', url)
if not parsed_url:
raise RuntimeError('Invalid rtsp url')
return {
'login': parsed_url.group(3),
'password': parsed_url.group(4),
'host': parsed_url.group(5),
'tcp_port': int(parsed_url.group(6)),
'url': url.replace(f'{parsed_url.group(2)}:{parsed_url.group(3)}@', '')}
async def _start_udp_server(self, idx):
""" Create datagram endpoint
"""
if not hasattr(self, 'udp_transports'):
self.udp_transports = {}
if idx in self.udp_transports:
return
try:
loop = asyncio.get_running_loop()
transport, _protocol = await loop.create_datagram_endpoint(
lambda: CameraUdpProtocol(self.hash, idx),
local_addr=('0.0.0.0', self.udp_ports[idx][0]))
self.udp_transports[idx] = transport
except Exception as e:
Log.print(f"Camera: error: can't create_datagram_endpoint: {e}")
class CameraUdpProtocol(asyncio.DatagramProtocol):
""" This callback function will be called when connection to the camera is made
"""
def __init__(self, camera_hash, idx):
self.hash = camera_hash
self.idx = idx
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
# This situation is impossible, just safety catch
if not Shared.data[self.hash]['camera']:
return
for _sid, client in Shared.data[self.hash]['clients'].items():
self.transport.sendto(data, (client.host, client.udp_ports[self.idx][0]))
| StarcoderdataPython |
1657426 | """
Diags package.
This package contains the Diagnostics class, which implements the rover diagnostics.
"""
from __future__ import print_function
import rospy
import time
import os
import subprocess
import re
import textwrap
from std_msgs.msg import String, Float32MultiArray, UInt8
from diagnostics.watcher import TopicWatcher
def _ok(*msg):
rospy.loginfo(' '.join(msg))
return _format_font('Lime', *msg)
def _warn(*msg):
rospy.logwarn(' '.join(msg))
return _format_font('Yellow', *msg)
def _err(*msg):
rospy.logerr(' '.join(msg))
return _format_font('Red', *msg)
def _format_font(color, *msg):
str = '<font color="' + color + '" size=2>'
str += ' '.join(msg)
str += "</font>"
return str
def _is_failed(statuses):
return TopicWatcher.FAILED_STATUS in statuses
def _is_warning(statuses):
return TopicWatcher.WARNING_STATUS in statuses
def _is_init(statuses):
return TopicWatcher.INIT_STATUS in statuses
def _all_ok(statuses):
for stat in statuses:
if stat != TopicWatcher.ACTIVE_STATUS:
return False
return True
class Diagnostics:
"""
Diagnostics class.
Publish diagnostics on two topics:
/rovername/diagnostics - Numerical data for the GUI
/diagsLog - Diagnostic messages.
"""
INITIALIZING = 0
READY = 1
FAILED = 2
FAILURE_INSTRUCTIONS = ' '.join(
textwrap.wrap(
textwrap.dedent(
'''
Please troubleshoot this failure and replace the rover if
necessary. If you can fix the issue on this rover, you must
re-deploy its code and re-position it in a valid starting
position near the home plate. If you can't diagnose the problem
or choose not to replace the rover, please ensure the rover
stops and remains in its current location.
'''
)
)
)
def __init__(self):
self._rover_name = rospy.get_namespace()
self._rover_name = self._rover_name.strip('/')
self._diags_log = rospy.Publisher('/diagsLog', String, queue_size=2, latch=True)
self._diags_msg = rospy.Publisher('diagnostics', Float32MultiArray, queue_size=1)
self._mode_pub = rospy.Publisher('mode', UInt8, queue_size=1, latch=True)
self._update_rate = 1 # Hz
self._r = rospy.Rate(self._update_rate)
self._status = Diagnostics.INITIALIZING
# Look for Gazebo
self._is_simulator = False
topics = rospy.get_published_topics()
for t in topics:
if t[0] == '/gazebo/link_states':
self._is_simulator = True
if self._is_simulator:
self._diags_log.publish(_ok("Simulated rover", self._rover_name, 'is initializing.'))
else:
self._diags_log.publish(_ok("Physical rover", self._rover_name, 'is initializing.'))
# FIXME: Need to sub the rover's wifi interface
self.interface = 'wlp0s20f3'
self.topics = [
"imu",
"odom",
"odom/filtered",
"sonarLeft", "sonarRight", "sonarCenter",
"targets"
]
self._watchers = [TopicWatcher(t) for t in self.topics]
self._last_if_bytes = self._get_if_bytes()
self._last_wall = time.time()
self._last_ros = rospy.get_time()
def _get_if_bytes(self):
total = 0
try:
for fx in ["/sys/class/net/{}/statistics/tx_bytes".format(self.interface),
"/sys/class/net/{}/statistics/rx_bytes".format(self.interface)]:
with open(fx) as f:
total += int(f.readline())
except IOError as e:
# This can happen in the simulator
print ('Could not open interface', self.interface)
return total
def do_stop(self):
self.do_messages()
msg = UInt8()
msg.data = 1
self._mode_pub.publish(msg)
self._diags_log.publish(_err('Rover', self._rover_name, 'is stopping!'))
def do_messages(self):
for w in self._watchers:
stat, msg = w.get_state_message()
if stat == TopicWatcher.WARNING_STATUS:
self._diags_log.publish(_warn(w.get_message()))
if stat == TopicWatcher.FAILED_STATUS:
self._diags_log.publish(_err(w.get_message()))
def run(self):
"""
Execute the diagnostics logic. Returns if the rover has failed.
"""
# Loop until we ge the shutdown message.
while not rospy.is_shutdown():
statuses = []
# Update all of the topic watchers.
for w in self._watchers:
stat = w.check()
statuses.append(stat)
if self._status == Diagnostics.INITIALIZING:
if _is_failed(statuses):
self._diags_log.publish(
_err("Rover", self._rover_name, 'has failed.',
Diagnostics.FAILURE_INSTRUCTIONS)
)
self._status = Diagnostics.FAILED
self.do_stop()
if _is_warning(statuses):
self.do_messages()
if _all_ok(statuses):
self._diags_log.publish(_ok("Rover", self._rover_name, 'is ready.'))
self._status = Diagnostics.READY
elif self._status == Diagnostics.READY:
if _is_failed(statuses):
self._diags_log.publish(
_err("Rover", self._rover_name, 'has failed.',
Diagnostics.FAILURE_INSTRUCTIONS)
)
self._status = Diagnostics.FAILED
self.do_stop()
if _is_warning(statuses):
self.do_messages()
elif self._status == Diagnostics.FAILED:
pass
else:
raise ValueError("Bad state!")
if self._is_simulator:
# Compute timeslip
curr_wall = time.time()
curr_ros = rospy.get_time()
rate = (self._last_ros - curr_ros) / (self._last_wall - curr_wall)
self._last_wall = curr_wall
self._last_ros = curr_ros
data = [0,0,rate]
else:
# Caluclate Bps
curr_if_bytes = self._get_if_bytes()
Bps = float(curr_if_bytes - self._last_if_bytes) / (1.0 / float(self._update_rate))
self._last_if_bytes = curr_if_bytes
# Link quality
link = 0
try:
iwconfig = subprocess.check_output(['iwconfig', self.interface])
m = re.search(r'Link Quality=(\d+)/70', iwconfig)
if m is not None:
link = int(m.group(1))
except Exception as e:
link = 0
data = [link,Bps,-1]
self._diags_msg.publish(Float32MultiArray(data=data))
self._r.sleep()
# The run loop has exited. Stop the rover and shutdown.
self.do_stop()
self._r.sleep()
self._r.sleep()
self._r.sleep()
| StarcoderdataPython |
3202408 | from flask import current_app
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature
from app.models.user import User
def generate_auth_token(user_class, expiration):
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': user_class.id}).decode('ascii')
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except BadSignature:
return None
return User.query.get(data['id'])
| StarcoderdataPython |
27628 | <reponame>fishface60/python-flock
#!/usr/bin/python
# Copyright (c) 2015, <NAME>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''Python Library/cli for providing a higher level interface to flock(2)'''
__version__ = '0.0.0'
__all__ = ('take_lock', 'release_lock', 'lockfile')
from contextlib import contextmanager
from errno import EINTR, EAGAIN, EBADF
from fcntl import flock, LOCK_SH, LOCK_EX, LOCK_NB, LOCK_UN
from multiprocessing import Pipe, Process
import os
from os import strerror
from signal import signal, SIGALRM, setitimer, ITIMER_REAL
from sys import exit
def _set_alarm_and_lock(fd, pipew, timeout, shared):
try:
# TODO: How can you deal with the race where the signal could
# be delivered before you lock, so instead of being woken up
# when the signal is delivered, we block forever.
signal(SIGALRM, lambda *_: None)
setitimer(ITIMER_REAL, timeout)
flock(fd, LOCK_SH if shared else LOCK_EX)
except BaseException as e:
# This loses the traceback, but it's not pickleable anyway
pipew.send(e)
exit(1)
else:
pipew.send(None)
exit(0)
def take_lock(fd, timeout=None, shared=False):
'''Take a lock on a file descriptor
If timeout is 0 the lock is taken without blocking,
if timeout is None we block indefinitely,
if timeout is a positive number we time out in that many seconds.
If shared is True this is a shared lock,
so can lock with other shared locks,
if shared is False this is an exclusive lock.
with open(path, 'r') as lock:
take_lock(lock.fileno(), timeout, shared)
'''
if timeout is None or timeout == 0:
flags = (LOCK_SH if shared else LOCK_EX)
flags |= (LOCK_NB if timeout == 0 else 0)
flock(fd, flags)
return
piper, pipew = Pipe(duplex=False)
p = Process(target=_set_alarm_and_lock,
args=(fd, pipew, timeout, shared))
p.start()
err = piper.recv()
p.join()
if err:
if isinstance(err, IOError) and err.errno == EINTR:
raise IOError(EAGAIN, strerror(EAGAIN))
raise err
def release_lock(fd):
'''Release a lock on a file descriptor
release_lock(lock.fileno())
'''
return flock(fd, LOCK_UN)
class _Lockfile(object):
def __init__(self, fd):
self.fd = fd
def lock(self, *args, **kwargs):
return take_lock(self.fd, *args, **kwargs)
def unlock(self):
return flock(self.fd, LOCK_UN)
@contextmanager
def lockfile(path):
'''Context manager for lock files.
with lockfile(path) as lockfobj:
lockfobj.lock(timeout=0, shared=False)
'''
fd = os.open(path, os.O_RDONLY)
lockfobj = _Lockfile(fd)
try:
yield lockfobj
finally:
# Handle double-close of file descriptor
try:
os.close(fd)
except OSError as e:
if e.errno != EBADF:
raise
if __name__ == '__main__':
from argparse import ArgumentParser
from subprocess import call
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version',
version=('%(prog)s ' + __version__))
parser.add_argument('--shared', action='store_true', default=False)
parser.add_argument('--exclusive', dest='shared', action='store_false')
parser.add_argument('--timeout', default=None, type=int)
parser.add_argument('--wait', dest='timeout', action='store_const', const=None)
parser.add_argument('--nonblock', dest='timeout', action='store_const', const=0)
parser.add_argument('file')
parser.add_argument('argv', nargs='*')
opts = parser.parse_args()
if len(opts.argv) == 0:
fd = int(opts.file)
take_lock(fd, opts.timeout, opts.shared)
else:
with lockfile(opts.file) as lock:
lock.lock(timeout=opts.timeout, shared=opts.shared)
exit(call(opts.argv))
| StarcoderdataPython |
1649473 | <gh_stars>1-10
import requests
from urllib.parse import urlparse
from urllib.request import urljoin
__all__ = 'Config', 'Nord'
class Config(object):
"""
Nord Configuration Client
"""
base = 'https://api.nordvpn.com'
endpoints = {
'address': '/user/address',
'config': '/files/zipv2',
'nameserver': '/dns/smart',
'server': '/server',
'stats': '/server/stats',
'user': '/user/databytoken'
}
def __init__(self, username=None, password=<PASSWORD>):
self.username = username
self.password = password
if username and password:
self.endpoints['oath'] = '/token/token/{username}'.format(
username=username
)
def __repr__(self):
username = self.username if self.username else 'anonymous'
name = self.__class__.__name__
return '<{name} [{username}]>'.format(
name=name,
username=username
)
@property
def headers(self):
base = urlparse(self.base)
return {
'User-Agent': '{app}/{version}'.format(
app='NordVPN Client',
version='0.0.1',
),
'Host': base.netloc,
'Connection': 'Close'
}
class Nord(Config):
"""
A Nord Clienht that interacts with the api.
"""
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
def __getattr__(self, name):
if name in self.api:
return self.request(name)
else:
return super(self.__class__, self).__getattribute__(name)
@property
def api(self):
return {
k: urljoin(self.base, v) for k,v in self.endpoints.items()
}
def request(self, endpoint):
return requests.get(self.api[endpoint], headers=self.headers)
| StarcoderdataPython |
3228312 | from django.apps import AppConfig
class PytuidConfig(AppConfig):
name = 'pyTUID'
| StarcoderdataPython |
1702600 | <filename>pvlibs/process_data/models/recombination.py
'''
'''
''' Imports '''
# data array processing
import numpy as np
''' Recombination Lifetime Calculation Functions '''
def calc_tau_aug(_dn, _n, _p, _n_0, _p_0, _n_i_eff, _T):
''' Calculate Auger Recombination Lifetime
Empirical model for auger recombination - Richter, 2012 []
valid 300 K only
Args:
_dn (float): excess minority carrier concentration [ / cm^-3]
_n (float): equilibrium electron concentration [ / cm^-3]
_p (float): equilibrium hole concentration [ / cm^-3]
_n (float): total electron concentration [ / cm^-3]
_p (float): total hole concentration [ / cm^-3]
_n_i_eff (float): effective intrinsic carrier concentration [ / cm^3]
_T (float) - temperature [K]
Returns:
tau_aug (float): auger recombination lifetime [ / s]
'''
g_maxn = 235548 * _T**(-1.5013)
g_maxp = 564812 * _T**(-1.6546)
g_eeh = 1 + (g_maxn - 1) * (1 - np.tanh((_n_0 / 3.3e17)**(0.66)))
g_ehh = 1 + (g_maxp - 1) * (1 - np.tanh((_p_0 / 7.0e17)**(0.63)))
C_p = 2.5e-31; C_n = 8.5e-32; C_a = 3.0e-29
C_p_eff = C_p * g_ehh
C_n_eff = C_n * g_eeh
invtau_auger_n = (C_n_eff * _n_0) * ((_n * _p - _n_i_eff**2) / _dn)
invtau_auger_p = (C_p_eff * _p_0) * ((_n * _p - _n_i_eff**2) / _dn)
invtau_auger_ambi = (C_a * _dn**0.92) * ((_n * _p - _n_i_eff**2) / _dn)
# calculate auger recombination lifetime
tau_aug = (invtau_auger_n + invtau_auger_p + invtau_auger_ambi)**-1
# return calculated auger recombination lifetime
return tau_aug
'''
'<NAME> 2012 - validated for high injection only (deltaN = 5e16) between -30C and 200C on N_A = 5e15 cm-3 and 421um thk wafers, p-type
C_a = (1.1E-28 / (TempInK - 193) + 2.1E-33 * TempInK) * ((delta_n / 5E+16) ^ (-0.2))
tau_auger_calc = 1 / (C_a * delta_n ^ 2)
'Altermatt 1997 - validated for 70-400K, low injection only for wafers with N_A > 1e16 cm-3 p-type wafers --> Need to fix
g_maxn = 235548 * TempInK ^ (-1.5013)
g_maxp = 564812 * TempInK ^ (-1.6546)
g_eeh = 1 + (g_maxn - 1) * (1 - Application.WorksheetFunction.Tanh((n / 5E+16) ^ (0.34)))
g_ehh = 1 + (g_maxp - 1) * (1 - Application.WorksheetFunction.Tanh((p / 5E+16) ^ (0.29)))
C_p = 7.91E-32 - 4.13E-35 * (TempInK) + 3.59E-37 * (TempInK ^ 2) 'cm^6/s Modified from Dziewior and Schmid
C_n = 2.8E-31 'cm^6/s From Dziewior and Schmid
C_p_eff = C_p * g_ehh
C_n_eff = C_n * g_eeh
invtau_auger_n = (C_n_eff * n ^ 2 * p) / delta_n
invtau_auger_p = (C_p_eff * p ^ 2 * n) / delta_n
tau_auger_calc = 1 / (invtau_auger_n + invtau_auger_p)
'Richter 2012 - validated for 300K only for all injection levels
n_0 = n - delta_n
p_0 = p - delta_n
n_i_eff = (n_0 * p_0) ^ 0.5
g_maxn = 14 '235548 * TempInK ^ (-1.5013)
g_maxp = 8.5 '564812 * TempInK ^ (-1.6546)
g_eeh = 1 + (g_maxn - 1) * (1 - Application.WorksheetFunction.Tanh((n_0 / 3.3E+17) ^ (0.66)))
g_ehh = 1 + (g_maxp - 1) * (1 - Application.WorksheetFunction.Tanh((p_0 / 7E+17) ^ (0.63)))
C_p = 2.5E-31 'cm^6/s @ 300K
C_n = 8.5E-32 'cm^6/s @ 300K
C_a = 3E-29 'cm^6/s @ 300K
C_p_eff = C_p * g_ehh
C_n_eff = C_n * g_eeh
invtau_auger_n = (C_n_eff * n_0) * ((n * p - n_i_eff ^ 2) / delta_n)
invtau_auger_n = (C_n_eff * n_0) * (n_0 + p_0 + delta_n)
invtau_auger_p = (C_p_eff * p_0) * ((n * p - n_i_eff ^ 2) / delta_n)
invtau_auger_ambi = (C_a * delta_n ^ 0.92) * ((n * p - n_i_eff ^ 2) / delta_n)
tau_auger_calc = 1 / (invtau_auger_n + invtau_auger_p + invtau_auger_ambi)
'''
def calc_tau_rad(_dn, _n, _p, _n_i_eff, _T):
''' Calculate Radiative Recombination Lifetime
Empirical model for radiative recombination lifetime - Altermatt et al, 2005
valid 100 to 390 K
Args:
_dn (float): excess minority carrier concentration [ / cm^-3]
_n (float): total electron concentration [ / cm^-3]
_p (float): total hole concentration [ / cm^-3]
_n_i_eff (float): effective intrinsic carrier concentration [ / cm^3]
_T (float) - temperature [K]
Returns:
tau_r (float): radiative recombination lifetime [ / s]
'''
# B_low from Trupke et al, 2003
B_low = 4.73e-15
b_max = 1; r_max = 0.2; s_max = 1.5e18; w_max = 4.0e18
r_min = 0; s_min = 1e7; w_min = 1e9
b_2 = 0.54; r_1 = 320; s_1 = 550; w_1 = 365; b_4 = 1.25
r_2 = 2.5; s_2 = 3; w_2 = 3.54
b_1 = s_max + (s_min - s_max) / (1 + (_T / s_1)**s_2)
b_3 = w_max + (w_min - w_max) / (1 + (_T / w_1)**w_2)
b_min = r_max + (r_min - r_max) / (1 + (_T / r_1)**r_2)
B_rel = b_min + (b_max - b_min) / (1 + ((_n + _p) / b_1)**b_2 + ((_n + _p) / b_3)**b_4)
b = B_rel * B_low
# calculate radiative recombination lifetime
tau_r = _dn / (b * ((_n * _p) - _n_i_eff**2))
# return calculated radiative recombination lifetime
return tau_r
def calc_tau_sdr(_dn, _J_0, _N_M, _n_i_eff, _W):
''' Calculate Surface Defect Recombination
Calculate total recombination lifetime from surface defect velocity, assumed identical at both surfaces
Args:
_dn (float): excess minority carrier density [ / cm^3]
_N_M (float): net doping density of majority carrier [ / cm^3]
_n_i_eff (float): effective intrinsic carrier concentration [ / cm^3]
_J_0 (float): emitter recombination velocity [fA]
_W (float): sample width [cm]
Returns:
tau_sdr (float): effective SRH recombination lifetime [ / s]
'''
# elementary charge [C]
q = 1.602e-19
# calculate recombination lifetime
tau_sdr = ( _J_0 * (_N_M + _dn) / (_W * q * _n_i_eff**2) )**-1
# return calculated surface defect recombination lifetime
return tau_sdr
def calc_tau_srh(_dn, _N_M, _t_m0, _t_M0):
''' Calculate Shockley-Read-Hall (SRH) Recombintion (simple model)
Args:
_dn (float): excess minority carrier density [ / cm^3]
_N_M (float): net doping density of majority carrier [ / cm^3]
_t_m0 (float): lifetime of minority charge carrier [ / s]
_t_M0 (float): lifetime of majority charge carrier [ / s]
Returns
tau_srh (float): effective SRH recombination lifetime [ / s]
'''
# calculate recombination lifetime
tau_srh = (_t_m0 + _t_M0 * _dn / (_dn + _N_M))
# return calculated effective SRH recombination lifetime
return tau_srh
def calc_tau_srh_WIP(_dn, _N_M, _t_m0, _t_M0):
''' Calculate Shockley-Read-Hall (SRH) Recombintion ### WIP ###
SRH recombintion lifetime incorporating trap energy levels
Args:
_dn (float): excess minority carrier density [ / cm^3]
_N_M (float): net doping density of majority carrier [ / cm^3]
_t_m0 (float): lifetime of minority charge carrier [ / s]
_t_M0 (float): lifetime of majority charge carrier [ / s]
Returns
tau_srh (float): effective SRH recombination lifetime [ / s]
'''
#def func_srh(_delta_n, _N_dop, _t_n0, _t_p0, _n_i_eff, _wafer_type, _T, _E_t):
#n, p = np_calc(_N_dop, _n_i_eff, _delta_n, _wafer_type, _T)
# calculate energy levels from trap
dE_t_v = (1.21 / 2) - _E_t
dE_t_c = _E_t - (1.21 / 2)
#
p1 = _n_i_eff * np.exp( -dE_t_v / .0257 )
n1 = _n_i_eff * np.exp( dE_t_c / .0257 )
# R_SRH = ( n * p - n_i**2 ) / ( tau_p * ( n * n1 ) + tau_N * ( p * n2 ) )
# n1 = N_c * exp( (E_t - E_c) / k_B * T )
# p1 = N_v * exp( (E_v - E_t) / k_B * T )
# k_np = (tau_p/tau_n)*(v_th_h/v_th_e)
# calculate recombination lifetime
#tau_srh = (_t_m0 + _t_M0 * _dn / (_dn + _N_M))
tau_srh = ( (n * p - _n_i_eff**2) / ( _delta_n * ( _t_n0 * (p + p1) + _t_p0 * (n + n1) ) ) )**-1
# return calculated effective SRH recombination lifetime
return tau_srh
| StarcoderdataPython |
3222504 | import os
import sys
import json
import torch
import logging
from tqdm import tqdm
from . import loader_utils
from ..constant import BOS_WORD, EOS_WORD
logger = logging.getLogger()
# -------------------------------------------------------------------------------------------
# preprocess label
# ------------------------------------------------------------------------------------------
def get_span_label(start_end_pos, max_doc_word):
# flatten, rank, filter overlap for answer positions
sorted_positions = loader_utils.flat_rank_pos(start_end_pos)
filter_positions = loader_utils.loose_filter_overlap(sorted_positions)
if len(filter_positions) != len(sorted_positions):
overlap_flag = True
else:
overlap_flag = False
s_label = [0] * max_doc_word
e_label = []
for s, e in filter_positions:
if (s <= e) and (e < max_doc_word):
s_label[s] = 1
e_label.append(e)
else:
continue
if (len(e_label) > 0) and (sum(s_label) == len(e_label)):
return {"s_label": s_label, "e_label": e_label, "overlap_flag": overlap_flag}
else:
return {"s_label": None, "e_label": None, "overlap_flag": overlap_flag}
def bert2span_preprocessor(
examples,
tokenizer,
max_token,
pretrain_model,
mode,
max_phrase_words,
stem_flag=False,
):
logger.info(
"start preparing (%s) features for bert2span (%s) ..." % (mode, pretrain_model)
)
overlap_num = 0
new_examples = []
for idx, ex in enumerate(tqdm(examples)):
# tokenize
tokenize_output = loader_utils.tokenize_for_bert(
doc_words=ex["doc_words"], tokenizer=tokenizer
)
if len(tokenize_output["tokens"]) < max_token:
max_word = max_token
else:
max_word = tokenize_output["tok_to_orig_index"][max_token - 1] + 1
new_ex = {}
new_ex["url"] = ex["url"]
new_ex["tokens"] = tokenize_output["tokens"][:max_token]
new_ex["valid_mask"] = tokenize_output["valid_mask"][:max_token]
new_ex["doc_words"] = ex["doc_words"][:max_word]
assert len(new_ex["tokens"]) == len(new_ex["valid_mask"])
assert sum(new_ex["valid_mask"]) == len(new_ex["doc_words"])
if mode == "train":
parameter = {
"start_end_pos": ex["start_end_pos"],
"max_doc_word": len(new_ex["doc_words"]),
}
label_dict = get_span_label(**parameter)
if not label_dict["s_label"]:
continue
new_ex["s_label"] = label_dict["s_label"]
new_ex["e_label"] = label_dict["e_label"]
assert sum(new_ex["valid_mask"]) == len(new_ex["s_label"])
if label_dict["overlap_flag"]:
overlap_num += 1
new_examples.append(new_ex)
logger.info(
"Delete Overlap Keyphrase : %d (overlap / total = %.2f"
% (overlap_num, float(overlap_num / len(examples) * 100))
+ "%)"
)
return new_examples
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# batch batchfy
def bert2span_converter(index, ex, tokenizer, mode, max_phrase_words):
""" convert each batch data to tensor ; add [CLS] [SEP] tokens ;"""
src_tokens = [BOS_WORD] + ex["tokens"] + [EOS_WORD]
valid_ids = [0] + ex["valid_mask"] + [0]
src_tensor = torch.LongTensor(tokenizer.convert_tokens_to_ids(src_tokens))
valid_mask = torch.LongTensor(valid_ids)
orig_doc_len = sum(valid_ids)
if mode == "train":
s_label_tensor = torch.LongTensor(ex["s_label"])
e_label_tensor = torch.LongTensor(ex["e_label"])
return (
index,
src_tensor,
valid_mask,
orig_doc_len,
s_label_tensor,
e_label_tensor,
)
else:
return index, src_tensor, valid_mask, orig_doc_len
def batchify_bert2span_features_for_train(batch):
""" train dataloader & eval dataloader ."""
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
s_label_list = [ex[4] for ex in batch]
e_label_list = [ex[5] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, : d.size(0)].copy_(d)
input_mask[i, : d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, : v.size(0)].copy_(v)
# ---------------------------------------------------------------
# [4] start label [5] active_mask
s_label = torch.LongTensor(len(s_label_list), max_word_len).zero_()
active_mask = torch.LongTensor(len(s_label_list), max_word_len).zero_()
for i, s in enumerate(s_label_list):
s_label[i, : s.size(0)].copy_(s)
active_mask[i, : s.size(0)].fill_(1)
# ---------------------------------------------------------------
# [6] end label [7] end_mask
e_label_max_length = max([e.size(0) for e in e_label_list])
e_label = torch.LongTensor(len(e_label_list), e_label_max_length).zero_()
end_mask = torch.LongTensor(len(e_label_list), e_label_max_length).zero_()
for i, e in enumerate(e_label_list):
if e.size(0) <= 0:
continue
e_label[i, : e.size(0)].copy_(e)
end_mask[i, : e.size(0)].fill_(1)
# -------------------------------------------------------------------
# [8] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return (
input_ids,
input_mask,
valid_ids,
valid_output,
active_mask,
s_label,
e_label,
end_mask,
ids,
)
def batchify_bert2span_features_for_test(batch):
""" test dataloader for Dev & Public_Valid."""
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, : d.size(0)].copy_(d)
input_mask[i, : d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, : v.size(0)].copy_(v)
# ---------------------------------------------------------------
# [4] valid length tensor
active_mask = torch.LongTensor(len(doc_word_lens), max_word_len).zero_()
for i, l in enumerate(doc_word_lens):
active_mask[i, :l].fill_(1)
# -------------------------------------------------------------------
# [5] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return (
input_ids,
input_mask,
valid_ids,
valid_output,
active_mask,
doc_word_lens,
ids,
)
| StarcoderdataPython |
29078 | <reponame>atish3/mig-website
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('history', '0006_committeemember_member'),
]
operations = [
migrations.AlterField(
model_name='meetingminutes',
name='meeting_type',
field=models.CharField(default=b'MM', max_length=2, choices=[(b'NI', b'New Initiatives'), (b'MM', b'Main Meetings'), (b'OF', b'Officer Meetings'), (b'AD', b'Advisory Board Meetings'), (b'CM', b'Committee Meeting Minutes')]),
),
]
| StarcoderdataPython |
102799 | <gh_stars>0
# This file will load only if OPi.GPIO fails because of a Dev environment.
# The basic idea is that when a pin is made HIGH or LOW it is writen into a file,
# and then when the input is checked it reads the file.......
from . import extendJSON as JSON
# Values
LOW = 0
HIGH = 1
# Modes
BCM = 11
BOARD = 10
# Pull
PUD_OFF = 20
PUD_DOWN = 21
PUD_UP = 22
# Edges
RISING = 31
FALLING = 32
BOTH = 33
# Functions
OUT = 0
IN = 1
SERIAL = 40
SPI = 41
I2C = 42
HARD_PWM = 43
UNKNOWN = -1
def setwarnings( a): pass
def setmode(a): pass
def getmode(): return BCM
def setup(channel, state, initial=0, pull_up_down=None): pass
def output(channel, state):
"""
To set the output state of a GPIO pin:
:param channel:
:return:
"""
# should try to open the json file containing the pin dict with error handling options
try:
pins = JSON.getJSONfile('pins.json')
pins = {int(k): v for k, v in pins.items()}
except EnvironmentError:
pins = {}
if channel not in pins:
pins[channel] = state
else:
pins[channel] = state
JSON.writeJSONfile('pins.json', pins)
return state
def input(channel):
"""
To read the value of a GPIO pin:
:param channel:
:return:
"""
# Should try to open the json file containing the pin dict
try:
pins = JSON.getJSONfile('pins.json')
pins = {int(k): v for k, v in pins.items()}
except EnvironmentError:
pins = {}
if channel not in pins:
return LOW
else:
state = pins[channel]
return state
def cleanup(a=None): pass
def wait_for_edge(channel, edge): pass
def add_event_detect(channel, edge, callback=None, bouncetime=None): pass
def add_event_callback(channel, callback=None): pass
def remove_event_detect(channel): pass
def event_detected(channel): return False
def gpio_function(channel): return OUT | StarcoderdataPython |
1766602 | # Copyright (c) 2021, <NAME>
# Licensed under BSD 3-Clause License. See LICENSE.txt for details.
from .where import where
from .value_locate import value_locate | StarcoderdataPython |
40178 | #!/usr/bin/python3
import json
import os
import subprocess
# Icons for the animation
sleep = ""
icons_base = ["","","","",""]
# Path to the script
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Run the script for getting the CPU usage
subprocess.Popen([os.path.join(__location__, "speedcpu.py")], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
with open(os.path.join(__location__, "data.json"),'r+') as file:
data = json.load(file)
cpu_usage = data["SpeedClock"]
fotograma = data["Photogram"]
if cpu_usage<10:
print(sleep + " " + str(round(cpu_usage)) + "%")
#If you want to change the speed of the animation, you can change the numbers below
else:
# You can change the interval of the different speeds.
if cpu_usage>=10 and cpu_usage<40:
keys = [0,0,0,0, 1,1,1,1, 2,2,2,2, 3,3,3,3, 4,4,4,4]
elif cpu_usage>=40 and cpu_usage<70:
keys = [0,0, 1,1, 2,2, 3,3, 4,4]
elif cpu_usage>=70 and cpu_usage<100:
keys = [0, 1, 2, 3, 4]
#print(keys[fotograma%len(keys)])
print(icons_base[keys[fotograma%len(keys)]] + " " + str(round(cpu_usage)) + "%")
data["Photogram"] = (fotograma + 1)%20
file.seek(0)
json.dump(data, file, indent=4)
file.truncate()
| StarcoderdataPython |
1683337 | import socket
import os
import math
se = socket.socket()
port = 5001
contype = input("Enter 1 for manual ip entering 2 for automatic ip configuration : ")
if contype == "1":
hostip = input("Enter virtual network ip : ")
elif contype == "2":
hostip = socket.gethostbyname(socket.gethostname())
se.bind((hostip,port))
se.listen(1)
print("Host Adress : " + str(hostip))
print("Port : " + str(port))
print("waiting for any incoming connections..")
conn, addr = se.accept()
print(addr,"Has connected to the server")
def filetransfer():
try:
filedata = file.read(1024)
conn.send(filedata)
print("Data transmitted succesfully")
except socket.error as msg:
print("File couldn't found " + str(msg) + "\n" + "Retry...")
filetransfer()
filename = input(str("Enter the name of the file to send: "))
if filename[-4:] == ".png":
c = os.path.getsize(filename)
# print(c)
CHUNK_SIZE = math.ceil(math.ceil(c) / 5)
# print(CHUNK_SIZE)
index = 1
with open(filename, 'rb') as infile:
chunk = infile.read(int(CHUNK_SIZE))
while chunk:
chunkname = filename[0:-4] + '_' + str(index)
print("chunk name is: " + chunkname + "\n")
with open(chunkname, 'wb+') as chunk_file:
chunk_file.write(chunk)
index += 1
chunk = infile.read(int(CHUNK_SIZE))
chunk_file.close()
i = 0
while (i < 5):
file = open(chunkname,'rb')
filetransfer()
i += 1
else:
file = open(filename, 'rb')
filetransfer()
| StarcoderdataPython |
3340687 | <reponame>CESNET/exafs<filename>migrations/versions/76856add9483_.py<gh_stars>1-10
"""empty message
Revision ID: 76856add9483
Revises: <PASSWORD>
Create Date: 2019-01-28 10:22:17.904055
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '76856add<PASSWORD>3'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('community',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=120), nullable=True),
sa.Column('command', sa.String(length=120), nullable=True),
sa.Column('description', sa.String(length=260), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('command'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('community')
# ### end Alembic commands ###
| StarcoderdataPython |
66585 | # Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import itertools
import operator
import zlib
import jmespath
from c7n.actions import BaseAction, ModifyVpcSecurityGroupsAction
from c7n.exceptions import PolicyValidationError, ClientError
from c7n.filters import (
DefaultVpcBase, Filter, ValueFilter)
import c7n.filters.vpc as net_filters
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.filters.related import RelatedResourceFilter
from c7n.filters.revisions import Diff
from c7n import query, resolver
from c7n.manager import resources
from c7n.resources.securityhub import OtherResourcePostFinding, PostFinding
from c7n.utils import (
chunks, local_session, type_schema, get_retry, parse_cidr)
from c7n.resources.aws import shape_validate
from c7n.resources.shield import IsShieldProtected, SetShieldProtection
@resources.register('vpc')
class Vpc(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc'
enum_spec = ('describe_vpcs', 'Vpcs', None)
name = id = 'VpcId'
filter_name = 'VpcIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPC'
id_prefix = "vpc-"
@Vpc.filter_registry.register('flow-logs')
class FlowLogFilter(Filter):
"""Are flow logs enabled on the resource.
ie to find all vpcs with flows logs disabled we can do this
:example:
.. code-block:: yaml
policies:
- name: flow-logs-enabled
resource: vpc
filters:
- flow-logs
or to find all vpcs with flow logs but that don't match a
particular configuration.
:example:
.. code-block:: yaml
policies:
- name: flow-mis-configured
resource: vpc
filters:
- not:
- type: flow-logs
enabled: true
set-op: or
op: equal
# equality operator applies to following keys
traffic-type: all
status: active
log-group: vpc-logs
"""
schema = type_schema(
'flow-logs',
**{'enabled': {'type': 'boolean', 'default': False},
'op': {'enum': ['equal', 'not-equal'], 'default': 'equal'},
'set-op': {'enum': ['or', 'and'], 'default': 'or'},
'status': {'enum': ['active']},
'deliver-status': {'enum': ['success', 'failure']},
'destination': {'type': 'string'},
'destination-type': {'enum': ['s3', 'cloud-watch-logs']},
'traffic-type': {'enum': ['accept', 'reject', 'all']},
'log-format': {'type': 'string'},
'log-group': {'type': 'string'}})
permissions = ('ec2:DescribeFlowLogs',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
# TODO given subnet/nic level logs, we should paginate, but we'll
# need to add/update botocore pagination support.
logs = client.describe_flow_logs().get('FlowLogs', ())
m = self.manager.get_model()
resource_map = {}
for fl in logs:
resource_map.setdefault(fl['ResourceId'], []).append(fl)
enabled = self.data.get('enabled', False)
log_group = self.data.get('log-group')
log_format = self.data.get('log-format')
traffic_type = self.data.get('traffic-type')
destination_type = self.data.get('destination-type')
destination = self.data.get('destination')
status = self.data.get('status')
delivery_status = self.data.get('deliver-status')
op = self.data.get('op', 'equal') == 'equal' and operator.eq or operator.ne
set_op = self.data.get('set-op', 'or')
results = []
# looping over vpc resources
for r in resources:
if r[m.id] not in resource_map:
# we didn't find a flow log for this vpc
if enabled:
# vpc flow logs not enabled so exclude this vpc from results
continue
results.append(r)
continue
flogs = resource_map[r[m.id]]
r['c7n:flow-logs'] = flogs
# config comparisons are pointless if we only want vpcs with no flow logs
if enabled:
fl_matches = []
for fl in flogs:
dest_type_match = (destination_type is None) or op(
fl['LogDestinationType'], destination_type)
dest_match = (destination is None) or op(
fl['LogDestination'], destination)
status_match = (status is None) or op(fl['FlowLogStatus'], status.upper())
delivery_status_match = (delivery_status is None) or op(
fl['DeliverLogsStatus'], delivery_status.upper())
traffic_type_match = (
traffic_type is None) or op(
fl['TrafficType'],
traffic_type.upper())
log_group_match = (log_group is None) or op(fl.get('LogGroupName'), log_group)
log_format_match = (log_format is None) or op(fl.get('LogFormat'), log_format)
# combine all conditions to check if flow log matches the spec
fl_match = (status_match and traffic_type_match and dest_match and
log_format_match and log_group_match and
dest_type_match and delivery_status_match)
fl_matches.append(fl_match)
if set_op == 'or':
if any(fl_matches):
results.append(r)
elif set_op == 'and':
if all(fl_matches):
results.append(r)
return results
@Vpc.filter_registry.register('security-group')
class VpcSecurityGroupFilter(RelatedResourceFilter):
"""Filter VPCs based on Security Group attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-sg
resource: vpc
filters:
- type: security-group
key: tag:Color
value: Gray
"""
schema = type_schema(
'security-group', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.SecurityGroup"
RelatedIdsExpression = '[SecurityGroups][].GroupId'
AnnotationKey = "matched-vpcs"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_group_ids = {
g['GroupId'] for g in
self.manager.get_resource_manager('security-group').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_group_ids
@Vpc.filter_registry.register('subnet')
class VpcSubnetFilter(RelatedResourceFilter):
"""Filter VPCs based on Subnet attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-subnet
resource: vpc
filters:
- type: subnet
key: tag:Color
value: Gray
"""
schema = type_schema(
'subnet', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.Subnet"
RelatedIdsExpression = '[Subnets][].SubnetId'
AnnotationKey = "MatchedVpcsSubnets"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_subnet_ids = {
g['SubnetId'] for g in
self.manager.get_resource_manager('subnet').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_subnet_ids
@Vpc.filter_registry.register('nat-gateway')
class VpcNatGatewayFilter(RelatedResourceFilter):
"""Filter VPCs based on NAT Gateway attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-nat
resource: vpc
filters:
- type: nat-gateway
key: tag:Color
value: Gray
"""
schema = type_schema(
'nat-gateway', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.NATGateway"
RelatedIdsExpression = '[NatGateways][].NatGatewayId'
AnnotationKey = "MatchedVpcsNatGateways"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_natgw_ids = {
g['NatGatewayId'] for g in
self.manager.get_resource_manager('nat-gateway').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_natgw_ids
@Vpc.filter_registry.register('internet-gateway')
class VpcInternetGatewayFilter(RelatedResourceFilter):
"""Filter VPCs based on Internet Gateway attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-igw
resource: vpc
filters:
- type: internet-gateway
key: tag:Color
value: Gray
"""
schema = type_schema(
'internet-gateway', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.InternetGateway"
RelatedIdsExpression = '[InternetGateways][].InternetGatewayId'
AnnotationKey = "MatchedVpcsIgws"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_igw_ids = set()
for igw in self.manager.get_resource_manager('internet-gateway').resources():
for attachment in igw['Attachments']:
if attachment.get('VpcId', '') in vpc_ids:
vpc_igw_ids.add(igw['InternetGatewayId'])
return vpc_igw_ids
@Vpc.filter_registry.register('vpc-attributes')
class AttributesFilter(Filter):
"""Filters VPCs based on their DNS attributes
:example:
.. code-block:: yaml
policies:
- name: dns-hostname-enabled
resource: vpc
filters:
- type: vpc-attributes
dnshostnames: True
"""
schema = type_schema(
'vpc-attributes',
dnshostnames={'type': 'boolean'},
dnssupport={'type': 'boolean'})
permissions = ('ec2:DescribeVpcAttribute',)
def process(self, resources, event=None):
results = []
client = local_session(self.manager.session_factory).client('ec2')
dns_hostname = self.data.get('dnshostnames', None)
dns_support = self.data.get('dnssupport', None)
for r in resources:
if dns_hostname is not None:
hostname = client.describe_vpc_attribute(
VpcId=r['VpcId'],
Attribute='enableDnsHostnames'
)['EnableDnsHostnames']['Value']
if dns_support is not None:
support = client.describe_vpc_attribute(
VpcId=r['VpcId'],
Attribute='enableDnsSupport'
)['EnableDnsSupport']['Value']
if dns_hostname is not None and dns_support is not None:
if dns_hostname == hostname and dns_support == support:
results.append(r)
elif dns_hostname is not None and dns_support is None:
if dns_hostname == hostname:
results.append(r)
elif dns_support is not None and dns_hostname is None:
if dns_support == support:
results.append(r)
return results
@Vpc.filter_registry.register('dhcp-options')
class DhcpOptionsFilter(Filter):
"""Filter VPCs based on their dhcp options
:example:
.. code-block:: yaml
policies:
- name: vpcs-in-domain
resource: vpc
filters:
- type: dhcp-options
domain-name: ec2.internal
if an option value is specified as a list, then all elements must be present.
if an option value is specified as a string, then that string must be present.
vpcs not matching a given option value can be found via specifying
a `present: false` parameter.
"""
option_keys = ('domain-name', 'domain-name-servers', 'ntp-servers')
schema = type_schema('dhcp-options', **{
k: {'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
{'type': 'string'}]}
for k in option_keys})
schema['properties']['present'] = {'type': 'boolean'}
permissions = ('ec2:DescribeDhcpOptions',)
def validate(self):
if not any([self.data.get(k) for k in self.option_keys]):
raise PolicyValidationError("one of %s required" % (self.option_keys,))
return self
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
option_ids = [r['DhcpOptionsId'] for r in resources]
options_map = {}
results = []
for options in client.describe_dhcp_options(
Filters=[{
'Name': 'dhcp-options-id',
'Values': option_ids}]).get('DhcpOptions', ()):
options_map[options['DhcpOptionsId']] = {
o['Key']: [v['Value'] for v in o['Values']]
for o in options['DhcpConfigurations']}
for vpc in resources:
if self.process_vpc(vpc, options_map[vpc['DhcpOptionsId']]):
results.append(vpc)
return results
def process_vpc(self, vpc, dhcp):
vpc['c7n:DhcpConfiguration'] = dhcp
found = True
for k in self.option_keys:
if k not in self.data:
continue
is_list = isinstance(self.data[k], list)
if k not in dhcp:
found = False
elif not is_list and self.data[k] not in dhcp[k]:
found = False
elif is_list and sorted(self.data[k]) != sorted(dhcp[k]):
found = False
if not self.data.get('present', True):
found = not found
return found
@Vpc.action_registry.register('post-finding')
class VpcPostFinding(PostFinding):
resource_type = "AwsEc2Vpc"
def format_resource(self, r):
envelope, payload = self.format_envelope(r)
# more inane sechub formatting deltas
detail = {
'DhcpOptionsId': r.get('DhcpOptionsId'),
'State': r['State']}
for assoc in r.get('CidrBlockAssociationSet', ()):
detail.setdefault('CidrBlockAssociationSet', []).append(dict(
AssociationId=assoc['AssociationId'],
CidrBlock=assoc['CidrBlock'],
CidrBlockState=assoc['CidrBlockState']['State']))
for assoc in r.get('Ipv6CidrBlockAssociationSet', ()):
detail.setdefault('Ipv6CidrBlockAssociationSet', []).append(dict(
AssociationId=assoc['AssociationId'],
Ipv6CidrBlock=assoc['Ipv6CidrBlock'],
CidrBlockState=assoc['Ipv6CidrBlockState']['State']))
payload.update(self.filter_empty(detail))
return envelope
@resources.register('subnet')
class Subnet(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'subnet'
enum_spec = ('describe_subnets', 'Subnets', None)
name = id = 'SubnetId'
filter_name = 'SubnetIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::Subnet'
id_prefix = "subnet-"
Subnet.filter_registry.register('flow-logs', FlowLogFilter)
@Subnet.filter_registry.register('vpc')
class SubnetVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
class ConfigSG(query.ConfigSource):
def load_resource(self, item):
r = super(ConfigSG, self).load_resource(item)
for rset in ('IpPermissions', 'IpPermissionsEgress'):
for p in r.get(rset, ()):
if p.get('FromPort', '') is None:
p.pop('FromPort')
if p.get('ToPort', '') is None:
p.pop('ToPort')
if 'Ipv6Ranges' not in p:
p[u'Ipv6Ranges'] = []
for i in p.get('UserIdGroupPairs', ()):
for k, v in list(i.items()):
if v is None:
i.pop(k)
# legacy config form, still version 1.2
for attribute, element_key in (('IpRanges', u'CidrIp'),):
if attribute not in p:
continue
p[attribute] = [{element_key: v} for v in p[attribute]]
if 'Ipv4Ranges' in p:
p['IpRanges'] = p.pop('Ipv4Ranges')
return r
@resources.register('security-group')
class SecurityGroup(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'security-group'
enum_spec = ('describe_security_groups', 'SecurityGroups', None)
id = 'GroupId'
name = 'GroupName'
filter_name = "GroupIds"
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::SecurityGroup"
id_prefix = "sg-"
source_mapping = {
'config': ConfigSG,
'describe': query.DescribeSource
}
@SecurityGroup.filter_registry.register('diff')
class SecurityGroupDiffFilter(Diff):
def diff(self, source, target):
differ = SecurityGroupDiff()
return differ.diff(source, target)
class SecurityGroupDiff:
"""Diff two versions of a security group
Immutable: GroupId, GroupName, Description, VpcId, OwnerId
Mutable: Tags, Rules
"""
def diff(self, source, target):
delta = {}
tag_delta = self.get_tag_delta(source, target)
if tag_delta:
delta['tags'] = tag_delta
ingress_delta = self.get_rule_delta('IpPermissions', source, target)
if ingress_delta:
delta['ingress'] = ingress_delta
egress_delta = self.get_rule_delta(
'IpPermissionsEgress', source, target)
if egress_delta:
delta['egress'] = egress_delta
if delta:
return delta
def get_tag_delta(self, source, target):
source_tags = {t['Key']: t['Value'] for t in source.get('Tags', ())}
target_tags = {t['Key']: t['Value'] for t in target.get('Tags', ())}
target_keys = set(target_tags.keys())
source_keys = set(source_tags.keys())
removed = source_keys.difference(target_keys)
added = target_keys.difference(source_keys)
changed = set()
for k in target_keys.intersection(source_keys):
if source_tags[k] != target_tags[k]:
changed.add(k)
return {k: v for k, v in {
'added': {k: target_tags[k] for k in added},
'removed': {k: source_tags[k] for k in removed},
'updated': {k: target_tags[k] for k in changed}}.items() if v}
def get_rule_delta(self, key, source, target):
source_rules = {
self.compute_rule_hash(r): r for r in source.get(key, ())}
target_rules = {
self.compute_rule_hash(r): r for r in target.get(key, ())}
source_keys = set(source_rules.keys())
target_keys = set(target_rules.keys())
removed = source_keys.difference(target_keys)
added = target_keys.difference(source_keys)
return {k: v for k, v in
{'removed': [source_rules[rid] for rid in sorted(removed)],
'added': [target_rules[rid] for rid in sorted(added)]}.items() if v}
RULE_ATTRS = (
('PrefixListIds', 'PrefixListId'),
('UserIdGroupPairs', 'GroupId'),
('IpRanges', 'CidrIp'),
('Ipv6Ranges', 'CidrIpv6')
)
def compute_rule_hash(self, rule):
buf = "%d-%d-%s-" % (
rule.get('FromPort', 0) or 0,
rule.get('ToPort', 0) or 0,
rule.get('IpProtocol', '-1') or '-1'
)
for a, ke in self.RULE_ATTRS:
if a not in rule:
continue
ev = [e[ke] for e in rule[a]]
ev.sort()
for e in ev:
buf += "%s-" % e
# mask to generate the same numeric value across all Python versions
return zlib.crc32(buf.encode('ascii')) & 0xffffffff
@SecurityGroup.action_registry.register('patch')
class SecurityGroupApplyPatch(BaseAction):
"""Modify a resource via application of a reverse delta.
"""
schema = type_schema('patch')
permissions = ('ec2:AuthorizeSecurityGroupIngress',
'ec2:AuthorizeSecurityGroupEgress',
'ec2:RevokeSecurityGroupIngress',
'ec2:RevokeSecurityGroupEgress',
'ec2:CreateTags',
'ec2:DeleteTags')
def validate(self):
diff_filters = [n for n in self.manager.iter_filters() if isinstance(
n, SecurityGroupDiffFilter)]
if not len(diff_filters):
raise PolicyValidationError(
"resource patching requires diff filter")
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
differ = SecurityGroupDiff()
patcher = SecurityGroupPatch()
for r in resources:
# reverse the patch by computing fresh, the forward
# patch is for notifications
d = differ.diff(r, r['c7n:previous-revision']['resource'])
patcher.apply_delta(client, r, d)
class SecurityGroupPatch:
RULE_TYPE_MAP = {
'egress': ('IpPermissionsEgress',
'revoke_security_group_egress',
'authorize_security_group_egress'),
'ingress': ('IpPermissions',
'revoke_security_group_ingress',
'authorize_security_group_ingress')}
retry = staticmethod(get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded')))
def apply_delta(self, client, target, change_set):
if 'tags' in change_set:
self.process_tags(client, target, change_set['tags'])
if 'ingress' in change_set:
self.process_rules(
client, 'ingress', target, change_set['ingress'])
if 'egress' in change_set:
self.process_rules(
client, 'egress', target, change_set['egress'])
def process_tags(self, client, group, tag_delta):
if 'removed' in tag_delta:
self.retry(client.delete_tags,
Resources=[group['GroupId']],
Tags=[{'Key': k}
for k in tag_delta['removed']])
tags = []
if 'added' in tag_delta:
tags.extend(
[{'Key': k, 'Value': v}
for k, v in tag_delta['added'].items()])
if 'updated' in tag_delta:
tags.extend(
[{'Key': k, 'Value': v}
for k, v in tag_delta['updated'].items()])
if tags:
self.retry(
client.create_tags, Resources=[group['GroupId']], Tags=tags)
def process_rules(self, client, rule_type, group, delta):
key, revoke_op, auth_op = self.RULE_TYPE_MAP[rule_type]
revoke, authorize = getattr(
client, revoke_op), getattr(client, auth_op)
# Process removes
if 'removed' in delta:
self.retry(revoke, GroupId=group['GroupId'],
IpPermissions=[r for r in delta['removed']])
# Process adds
if 'added' in delta:
self.retry(authorize, GroupId=group['GroupId'],
IpPermissions=[r for r in delta['added']])
class SGUsage(Filter):
def get_permissions(self):
return list(itertools.chain(
*[self.manager.get_resource_manager(m).get_permissions()
for m in
['lambda', 'eni', 'launch-config', 'security-group', 'event-rule-target']]))
def filter_peered_refs(self, resources):
if not resources:
return resources
# Check that groups are not referenced across accounts
client = local_session(self.manager.session_factory).client('ec2')
peered_ids = set()
for resource_set in chunks(resources, 200):
for sg_ref in client.describe_security_group_references(
GroupId=[r['GroupId'] for r in resource_set]
)['SecurityGroupReferenceSet']:
peered_ids.add(sg_ref['GroupId'])
self.log.debug(
"%d of %d groups w/ peered refs", len(peered_ids), len(resources))
return [r for r in resources if r['GroupId'] not in peered_ids]
def get_scanners(self):
return (
("nics", self.get_eni_sgs),
("sg-perm-refs", self.get_sg_refs),
('lambdas', self.get_lambda_sgs),
("launch-configs", self.get_launch_config_sgs),
("ecs-cwe", self.get_ecs_cwe_sgs),
("codebuild", self.get_codebuild_sgs),
)
def scan_groups(self):
used = set()
for kind, scanner in self.get_scanners():
sg_ids = scanner()
new_refs = sg_ids.difference(used)
used = used.union(sg_ids)
self.log.debug(
"%s using %d sgs, new refs %s total %s",
kind, len(sg_ids), len(new_refs), len(used))
return used
def get_launch_config_sgs(self):
# Note assuming we also have launch config garbage collection
# enabled.
sg_ids = set()
for cfg in self.manager.get_resource_manager('launch-config').resources():
for g in cfg['SecurityGroups']:
sg_ids.add(g)
for g in cfg['ClassicLinkVPCSecurityGroups']:
sg_ids.add(g)
return sg_ids
def get_lambda_sgs(self):
sg_ids = set()
for func in self.manager.get_resource_manager('lambda').resources(augment=False):
if 'VpcConfig' not in func:
continue
for g in func['VpcConfig']['SecurityGroupIds']:
sg_ids.add(g)
return sg_ids
def get_eni_sgs(self):
sg_ids = set()
for nic in self.manager.get_resource_manager('eni').resources():
for g in nic['Groups']:
sg_ids.add(g['GroupId'])
return sg_ids
def get_codebuild_sgs(self):
sg_ids = set()
for cb in self.manager.get_resource_manager('codebuild').resources():
sg_ids |= set(cb.get('vpcConfig', {}).get('securityGroupIds', []))
return sg_ids
def get_sg_refs(self):
sg_ids = set()
for sg in self.manager.get_resource_manager('security-group').resources():
for perm_type in ('IpPermissions', 'IpPermissionsEgress'):
for p in sg.get(perm_type, []):
for g in p.get('UserIdGroupPairs', ()):
sg_ids.add(g['GroupId'])
return sg_ids
def get_ecs_cwe_sgs(self):
sg_ids = set()
expr = jmespath.compile(
'EcsParameters.NetworkConfiguration.awsvpcConfiguration.SecurityGroups[]')
for rule in self.manager.get_resource_manager(
'event-rule-target').resources(augment=False):
ids = expr.search(rule)
if ids:
sg_ids.update(ids)
return sg_ids
@SecurityGroup.filter_registry.register('unused')
class UnusedSecurityGroup(SGUsage):
"""Filter to just vpc security groups that are not used.
We scan all extant enis in the vpc to get a baseline set of groups
in use. Then augment with those referenced by launch configs, and
lambdas as they may not have extant resources in the vpc at a
given moment. We also find any security group with references from
other security group either within the vpc or across peered
connections. Also checks cloud watch event targeting ecs.
Checks - enis, lambda, launch-configs, sg rule refs, and ecs cwe
targets.
Note this filter does not support classic security groups atm.
:example:
.. code-block:: yaml
policies:
- name: security-groups-unused
resource: security-group
filters:
- unused
"""
schema = type_schema('unused')
def process(self, resources, event=None):
used = self.scan_groups()
unused = [
r for r in resources
if r['GroupId'] not in used and 'VpcId' in r]
return unused and self.filter_peered_refs(unused) or []
@SecurityGroup.filter_registry.register('used')
class UsedSecurityGroup(SGUsage):
"""Filter to security groups that are used.
This operates as a complement to the unused filter for multi-step
workflows.
:example:
.. code-block:: yaml
policies:
- name: security-groups-in-use
resource: security-group
filters:
- used
"""
schema = type_schema('used')
def process(self, resources, event=None):
used = self.scan_groups()
unused = [
r for r in resources
if r['GroupId'] not in used and 'VpcId' in r]
unused = {g['GroupId'] for g in self.filter_peered_refs(unused)}
return [r for r in resources if r['GroupId'] not in unused]
@SecurityGroup.filter_registry.register('stale')
class Stale(Filter):
"""Filter to find security groups that contain stale references
to other groups that are either no longer present or traverse
a broken vpc peering connection. Note this applies to VPC
Security groups only and will implicitly filter security groups.
AWS Docs:
https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-security-groups.html
:example:
.. code-block:: yaml
policies:
- name: stale-security-groups
resource: security-group
filters:
- stale
"""
schema = type_schema('stale')
permissions = ('ec2:DescribeStaleSecurityGroups',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
vpc_ids = {r['VpcId'] for r in resources if 'VpcId' in r}
group_map = {r['GroupId']: r for r in resources}
results = []
self.log.debug("Querying %d vpc for stale refs", len(vpc_ids))
stale_count = 0
for vpc_id in vpc_ids:
stale_groups = client.describe_stale_security_groups(
VpcId=vpc_id).get('StaleSecurityGroupSet', ())
stale_count += len(stale_groups)
for s in stale_groups:
if s['GroupId'] in group_map:
r = group_map[s['GroupId']]
if 'StaleIpPermissions' in s:
r['MatchedIpPermissions'] = s['StaleIpPermissions']
if 'StaleIpPermissionsEgress' in s:
r['MatchedIpPermissionsEgress'] = s[
'StaleIpPermissionsEgress']
results.append(r)
self.log.debug("Found %d stale security groups", stale_count)
return results
@SecurityGroup.filter_registry.register('default-vpc')
class SGDefaultVpc(DefaultVpcBase):
"""Filter that returns any security group that exists within the default vpc
:example:
.. code-block:: yaml
policies:
- name: security-group-default-vpc
resource: security-group
filters:
- default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, resource, event=None):
if 'VpcId' not in resource:
return False
return self.match(resource['VpcId'])
class SGPermission(Filter):
"""Filter for verifying security group ingress and egress permissions
All attributes of a security group permission are available as
value filters.
If multiple attributes are specified the permission must satisfy
all of them. Note that within an attribute match against a list value
of a permission we default to or.
If a group has any permissions that match all conditions, then it
matches the filter.
Permissions that match on the group are annotated onto the group and
can subsequently be used by the remove-permission action.
We have specialized handling for matching `Ports` in ingress/egress
permission From/To range. The following example matches on ingress
rules which allow for a range that includes all of the given ports.
.. code-block:: yaml
- type: ingress
Ports: [22, 443, 80]
As well for verifying that a rule only allows for a specific set of ports
as in the following example. The delta between this and the previous
example is that if the permission allows for any ports not specified here,
then the rule will match. ie. OnlyPorts is a negative assertion match,
it matches when a permission includes ports outside of the specified set.
.. code-block:: yaml
- type: ingress
OnlyPorts: [22]
For simplifying ipranges handling which is specified as a list on a rule
we provide a `Cidr` key which can be used as a value type filter evaluated
against each of the rules. If any iprange cidr match then the permission
matches.
.. code-block:: yaml
- type: ingress
IpProtocol: -1
FromPort: 445
We also have specialized handling for matching self-references in
ingress/egress permissions. The following example matches on ingress
rules which allow traffic its own same security group.
.. code-block:: yaml
- type: ingress
SelfReference: True
As well for assertions that a ingress/egress permission only matches
a given set of ports, *note* OnlyPorts is an inverse match.
.. code-block:: yaml
- type: egress
OnlyPorts: [22, 443, 80]
- type: egress
Cidr:
value_type: cidr
op: in
value: x.y.z
`Cidr` can match ipv4 rules and `CidrV6` can match ipv6 rules. In
this example we are blocking global inbound connections to SSH or
RDP.
.. code-block:: yaml
- or:
- type: ingress
Ports: [22, 3389]
Cidr:
value: "0.0.0.0/0"
- type: ingress
Ports: [22, 3389]
CidrV6:
value: "::/0"
`SGReferences` can be used to filter out SG references in rules.
In this example we want to block ingress rules that reference a SG
that is tagged with `Access: Public`.
.. code-block:: yaml
- type: ingress
SGReferences:
key: "tag:Access"
value: "Public"
op: equal
We can also filter SG references based on the VPC that they are
within. In this example we want to ensure that our outbound rules
that reference SGs are only referencing security groups within a
specified VPC.
.. code-block:: yaml
- type: egress
SGReferences:
key: 'VpcId'
value: 'vpc-11a1a1aa'
op: equal
Likewise, we can also filter SG references by their description.
For example, we can prevent egress rules from referencing any
SGs that have a description of "default - DO NOT USE".
.. code-block:: yaml
- type: egress
SGReferences:
key: 'Description'
value: 'default - DO NOT USE'
op: equal
"""
perm_attrs = {
'IpProtocol', 'FromPort', 'ToPort', 'UserIdGroupPairs',
'IpRanges', 'PrefixListIds'}
filter_attrs = {
'Cidr', 'CidrV6', 'Ports', 'OnlyPorts',
'SelfReference', 'Description', 'SGReferences'}
attrs = perm_attrs.union(filter_attrs)
attrs.add('match-operator')
attrs.add('match-operator')
def validate(self):
delta = set(self.data.keys()).difference(self.attrs)
delta.remove('type')
if delta:
raise PolicyValidationError("Unknown keys %s on %s" % (
", ".join(delta), self.manager.data))
return self
def process(self, resources, event=None):
self.vfilters = []
fattrs = list(sorted(self.perm_attrs.intersection(self.data.keys())))
self.ports = 'Ports' in self.data and self.data['Ports'] or ()
self.only_ports = (
'OnlyPorts' in self.data and self.data['OnlyPorts'] or ())
for f in fattrs:
fv = self.data.get(f)
if isinstance(fv, dict):
fv['key'] = f
else:
fv = {f: fv}
vf = ValueFilter(fv, self.manager)
vf.annotate = False
self.vfilters.append(vf)
return super(SGPermission, self).process(resources, event)
def process_ports(self, perm):
found = None
if 'FromPort' in perm and 'ToPort' in perm:
for port in self.ports:
if port >= perm['FromPort'] and port <= perm['ToPort']:
found = True
break
found = False
only_found = False
for port in self.only_ports:
if port == perm['FromPort'] and port == perm['ToPort']:
only_found = True
if self.only_ports and not only_found:
found = found is None or found and True or False
if self.only_ports and only_found:
found = False
return found
def _process_cidr(self, cidr_key, cidr_type, range_type, perm):
found = None
ip_perms = perm.get(range_type, [])
if not ip_perms:
return False
match_range = self.data[cidr_key]
if isinstance(match_range, dict):
match_range['key'] = cidr_type
else:
match_range = {cidr_type: match_range}
vf = ValueFilter(match_range, self.manager)
vf.annotate = False
for ip_range in ip_perms:
found = vf(ip_range)
if found:
break
else:
found = False
return found
def process_cidrs(self, perm):
found_v6 = found_v4 = None
if 'CidrV6' in self.data:
found_v6 = self._process_cidr('CidrV6', 'CidrIpv6', 'Ipv6Ranges', perm)
if 'Cidr' in self.data:
found_v4 = self._process_cidr('Cidr', 'CidrIp', 'IpRanges', perm)
match_op = self.data.get('match-operator', 'and') == 'and' and all or any
cidr_match = [k for k in (found_v6, found_v4) if k is not None]
if not cidr_match:
return None
return match_op(cidr_match)
def process_description(self, perm):
if 'Description' not in self.data:
return None
d = dict(self.data['Description'])
d['key'] = 'Description'
vf = ValueFilter(d, self.manager)
vf.annotate = False
for k in ('Ipv6Ranges', 'IpRanges', 'UserIdGroupPairs', 'PrefixListIds'):
if k not in perm or not perm[k]:
continue
return vf(perm[k][0])
return False
def process_self_reference(self, perm, sg_id):
found = None
ref_match = self.data.get('SelfReference')
if ref_match is not None:
found = False
if 'UserIdGroupPairs' in perm and 'SelfReference' in self.data:
self_reference = sg_id in [p['GroupId']
for p in perm['UserIdGroupPairs']]
if ref_match is False and not self_reference:
found = True
if ref_match is True and self_reference:
found = True
return found
def process_sg_references(self, perm, owner_id):
sg_refs = self.data.get('SGReferences')
if not sg_refs:
return None
sg_perm = perm.get('UserIdGroupPairs', [])
if not sg_perm:
return False
sg_group_ids = [p['GroupId'] for p in sg_perm if p['UserId'] == owner_id]
sg_resources = self.manager.get_resources(sg_group_ids)
vf = ValueFilter(sg_refs, self.manager)
vf.annotate = False
for sg in sg_resources:
if vf(sg):
return True
return False
def expand_permissions(self, permissions):
"""Expand each list of cidr, prefix list, user id group pair
by port/protocol as an individual rule.
The console ux automatically expands them out as addition/removal is
per this expansion, the describe calls automatically group them.
"""
for p in permissions:
np = dict(p)
values = {}
for k in (u'IpRanges',
u'Ipv6Ranges',
u'PrefixListIds',
u'UserIdGroupPairs'):
values[k] = np.pop(k, ())
np[k] = []
for k, v in values.items():
if not v:
continue
for e in v:
ep = dict(np)
ep[k] = [e]
yield ep
def __call__(self, resource):
matched = []
sg_id = resource['GroupId']
owner_id = resource['OwnerId']
match_op = self.data.get('match-operator', 'and') == 'and' and all or any
for perm in self.expand_permissions(resource[self.ip_permissions_key]):
perm_matches = {}
for idx, f in enumerate(self.vfilters):
perm_matches[idx] = bool(f(perm))
perm_matches['description'] = self.process_description(perm)
perm_matches['ports'] = self.process_ports(perm)
perm_matches['cidrs'] = self.process_cidrs(perm)
perm_matches['self-refs'] = self.process_self_reference(perm, sg_id)
perm_matches['sg-refs'] = self.process_sg_references(perm, owner_id)
perm_match_values = list(filter(
lambda x: x is not None, perm_matches.values()))
# account for one python behavior any([]) == False, all([]) == True
if match_op == all and not perm_match_values:
continue
match = match_op(perm_match_values)
if match:
matched.append(perm)
if matched:
resource['Matched%s' % self.ip_permissions_key] = matched
return True
SGPermissionSchema = {
'match-operator': {'type': 'string', 'enum': ['or', 'and']},
'Ports': {'type': 'array', 'items': {'type': 'integer'}},
'SelfReference': {'type': 'boolean'},
'OnlyPorts': {'type': 'array', 'items': {'type': 'integer'}},
'IpProtocol': {
'oneOf': [
{'enum': ["-1", -1, 'tcp', 'udp', 'icmp', 'icmpv6']},
{'$ref': '#/definitions/filters/value'}
]
},
'FromPort': {'oneOf': [
{'$ref': '#/definitions/filters/value'},
{'type': 'integer'}]},
'ToPort': {'oneOf': [
{'$ref': '#/definitions/filters/value'},
{'type': 'integer'}]},
'UserIdGroupPairs': {},
'IpRanges': {},
'PrefixListIds': {},
'Description': {},
'Cidr': {},
'CidrV6': {},
'SGReferences': {}
}
@SecurityGroup.filter_registry.register('ingress')
class IPPermission(SGPermission):
ip_permissions_key = "IpPermissions"
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {'type': {'enum': ['ingress']}},
'required': ['type']}
schema['properties'].update(SGPermissionSchema)
@SecurityGroup.filter_registry.register('egress')
class IPPermissionEgress(SGPermission):
ip_permissions_key = "IpPermissionsEgress"
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {'type': {'enum': ['egress']}},
'required': ['type']}
schema['properties'].update(SGPermissionSchema)
@SecurityGroup.action_registry.register('delete')
class Delete(BaseAction):
"""Action to delete security group(s)
It is recommended to apply a filter to the delete policy to avoid the
deletion of all security groups returned.
:example:
.. code-block:: yaml
policies:
- name: security-groups-unused-delete
resource: security-group
filters:
- type: unused
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('ec2:DeleteSecurityGroup',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
client.delete_security_group(GroupId=r['GroupId'])
@SecurityGroup.action_registry.register('remove-permissions')
class RemovePermissions(BaseAction):
"""Action to remove ingress/egress rule(s) from a security group
:example:
.. code-block:: yaml
policies:
- name: security-group-revoke-8080
resource: security-group
filters:
- type: ingress
IpProtocol: tcp
Ports: [8080]
actions:
- type: remove-permissions
ingress: matched
"""
schema = type_schema(
'remove-permissions',
ingress={'type': 'string', 'enum': ['matched', 'all']},
egress={'type': 'string', 'enum': ['matched', 'all']})
permissions = ('ec2:RevokeSecurityGroupIngress',
'ec2:RevokeSecurityGroupEgress')
def process(self, resources):
i_perms = self.data.get('ingress', 'matched')
e_perms = self.data.get('egress', 'matched')
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
for label, perms in [('ingress', i_perms), ('egress', e_perms)]:
if perms == 'matched':
key = 'MatchedIpPermissions%s' % (
label == 'egress' and 'Egress' or '')
groups = r.get(key, ())
elif perms == 'all':
key = 'IpPermissions%s' % (
label == 'egress' and 'Egress' or '')
groups = r.get(key, ())
elif isinstance(perms, list):
groups = perms
else:
continue
if not groups:
continue
method = getattr(client, 'revoke_security_group_%s' % label)
method(GroupId=r['GroupId'], IpPermissions=groups)
@SecurityGroup.action_registry.register('set-permissions')
class SetPermissions(BaseAction):
"""Action to add/remove ingress/egress rule(s) to a security group
:example:
.. code-block:: yaml
policies:
- name: ops-access-via
resource: aws.security-group
filters:
- type: ingress
IpProtocol: "-1"
Ports: [22, 3389]
Cidr: "0.0.0.0/0"
actions:
- type: set-permissions
# remove the permission matched by a previous ingress filter.
remove-ingress: matched
# remove permissions by specifying them fully, ie remove default outbound
# access.
remove-egress:
- IpProtocol: "-1"
Cidr: "0.0.0.0/0"
# add a list of permissions to the group.
add-ingress:
# full syntax/parameters to authorize can be used.
- IpPermissions:
- IpProtocol: TCP
FromPort: 22
ToPort: 22
IpRanges:
- Description: Ops SSH Access
CidrIp: "1.1.1.1/32"
- Description: Security SSH Access
CidrIp: "172.16.58.3/32"
# add a list of egress permissions to a security group
add-egress:
- IpProtocol: "TCP"
FromPort: 5044
ToPort: 5044
CidrIp: "192.168.1.2/32"
"""
schema = type_schema(
'set-permissions',
**{'add-ingress': {'type': 'array', 'items': {'type': 'object', 'minProperties': 1}},
'remove-ingress': {'oneOf': [
{'enum': ['all', 'matched']},
{'type': 'array', 'items': {'type': 'object', 'minProperties': 2}}]},
'add-egress': {'type': 'array', 'items': {'type': 'object', 'minProperties': 1}},
'remove-egress': {'oneOf': [
{'enum': ['all', 'matched']},
{'type': 'array', 'items': {'type': 'object', 'minProperties': 2}}]}}
)
permissions = (
'ec2:AuthorizeSecurityGroupEgress',
'ec2:AuthorizeSecurityGroupIngress',)
ingress_shape = "AuthorizeSecurityGroupIngressRequest"
egress_shape = "AuthorizeSecurityGroupEgressRequest"
def validate(self):
request_template = {'GroupId': 'sg-06bc5ce18a2e5d57a'}
for perm_type, shape in (
('egress', self.egress_shape), ('ingress', self.ingress_shape)):
for perm in self.data.get('add-%s' % type, ()):
params = dict(request_template)
params.update(perm)
shape_validate(params, shape, 'ec2')
def get_permissions(self):
perms = ()
if 'add-ingress' in self.data:
perms += ('ec2:AuthorizeSecurityGroupIngress',)
if 'add-egress' in self.data:
perms += ('ec2:AuthorizeSecurityGroupEgress',)
if 'remove-ingress' in self.data or 'remove-egress' in self.data:
perms += RemovePermissions.permissions
if not perms:
perms = self.permissions + RemovePermissions.permissions
return perms
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
for method, permissions in (
(client.authorize_security_group_egress, self.data.get('add-egress', ())),
(client.authorize_security_group_ingress, self.data.get('add-ingress', ()))):
for p in permissions:
p = dict(p)
p['GroupId'] = r['GroupId']
try:
method(**p)
except ClientError as e:
if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':
raise
remover = RemovePermissions(
{'ingress': self.data.get('remove-ingress', ()),
'egress': self.data.get('remove-egress', ())}, self.manager)
remover.process(resources)
@SecurityGroup.action_registry.register('post-finding')
class SecurityGroupPostFinding(OtherResourcePostFinding):
def format_resource(self, r):
fr = super(SecurityGroupPostFinding, self).format_resource(r)
fr['Type'] = 'AwsEc2SecurityGroup'
return fr
class DescribeENI(query.DescribeSource):
def augment(self, resources):
for r in resources:
r['Tags'] = r.pop('TagSet', [])
return resources
@resources.register('eni')
class NetworkInterface(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'eni'
enum_spec = ('describe_network_interfaces', 'NetworkInterfaces', None)
name = id = 'NetworkInterfaceId'
filter_name = 'NetworkInterfaceIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::NetworkInterface"
id_prefix = "eni-"
source_mapping = {
'describe': DescribeENI,
'config': query.ConfigSource
}
NetworkInterface.filter_registry.register('flow-logs', FlowLogFilter)
NetworkInterface.filter_registry.register(
'network-location', net_filters.NetworkLocation)
@NetworkInterface.filter_registry.register('subnet')
class InterfaceSubnetFilter(net_filters.SubnetFilter):
"""Network interface subnet filter
:example:
.. code-block:: yaml
policies:
- name: network-interface-in-subnet
resource: eni
filters:
- type: subnet
key: CidrBlock
value: 10.0.2.0/24
"""
RelatedIdsExpression = "SubnetId"
@NetworkInterface.filter_registry.register('security-group')
class InterfaceSecurityGroupFilter(net_filters.SecurityGroupFilter):
"""Network interface security group filter
:example:
.. code-block:: yaml
policies:
- name: network-interface-ssh
resource: eni
filters:
- type: security-group
match-resource: true
key: FromPort
value: 22
"""
RelatedIdsExpression = "Groups[].GroupId"
@NetworkInterface.filter_registry.register('vpc')
class InterfaceVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@NetworkInterface.action_registry.register('modify-security-groups')
class InterfaceModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Remove security groups from an interface.
Can target either physical groups as a list of group ids or
symbolic groups like 'matched' or 'all'. 'matched' uses
the annotations of the 'group' interface filter.
Note an interface always gets at least one security group, so
we also allow specification of an isolation/quarantine group
that can be specified if there would otherwise be no groups.
:example:
.. code-block:: yaml
policies:
- name: network-interface-remove-group
resource: eni
filters:
- type: security-group
match-resource: true
key: FromPort
value: 22
actions:
- type: modify-security-groups
isolation-group: sg-01ab23c4
add: []
"""
permissions = ('ec2:ModifyNetworkInterfaceAttribute',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
groups = super(
InterfaceModifyVpcSecurityGroups, self).get_groups(resources)
for idx, r in enumerate(resources):
client.modify_network_interface_attribute(
NetworkInterfaceId=r['NetworkInterfaceId'],
Groups=groups[idx])
@NetworkInterface.action_registry.register('delete')
class DeleteNetworkInterface(BaseAction):
"""Delete a network interface.
:example:
.. code-block:: yaml
policies:
- name: mark-orphaned-enis
comment: Flag abandoned Lambda VPC ENIs for deletion
resource: eni
filters:
- Status: available
- type: value
op: glob
key: Description
value: "AWS Lambda VPC ENI*"
- "tag:custodian_status": absent
actions:
- type: mark-for-op
tag: custodian_status
msg: "Orphaned Lambda VPC ENI: {op}@{action_date}"
op: delete
days: 1
- name: delete-marked-enis
comment: Delete flagged ENIs that have not been cleaned up naturally
resource: eni
filters:
- type: marked-for-op
tag: custodian_status
op: delete
actions:
- type: delete
"""
permissions = ('ec2:DeleteNetworkInterface',)
schema = type_schema('delete')
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
try:
self.manager.retry(
client.delete_network_interface,
NetworkInterfaceId=r['NetworkInterfaceId'])
except ClientError as err:
if not err.response['Error']['Code'] == 'InvalidNetworkInterfaceID.NotFound':
raise
@resources.register('route-table')
class RouteTable(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'route-table'
enum_spec = ('describe_route_tables', 'RouteTables', None)
name = id = 'RouteTableId'
filter_name = 'RouteTableIds'
filter_type = 'list'
id_prefix = "rtb-"
cfn_type = config_type = "AWS::EC2::RouteTable"
@RouteTable.filter_registry.register('vpc')
class RouteTableVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@RouteTable.filter_registry.register('subnet')
class SubnetRoute(net_filters.SubnetFilter):
"""Filter a route table by its associated subnet attributes."""
RelatedIdsExpression = "Associations[].SubnetId"
RelatedMapping = None
def get_related_ids(self, resources):
if self.RelatedIdMapping is None:
return super(SubnetRoute, self).get_related_ids(resources)
return list(itertools.chain(*[self.RelatedIdMapping[r['RouteTableId']] for r in resources]))
def get_related(self, resources):
rt_subnet_map = {}
main_tables = {}
manager = self.get_resource_manager()
for r in resources:
rt_subnet_map[r['RouteTableId']] = []
for a in r.get('Associations', ()):
if 'SubnetId' in a:
rt_subnet_map[r['RouteTableId']].append(a['SubnetId'])
elif a.get('Main'):
main_tables[r['VpcId']] = r['RouteTableId']
explicit_subnet_ids = set(itertools.chain(*rt_subnet_map.values()))
subnets = manager.resources()
for s in subnets:
if s['SubnetId'] in explicit_subnet_ids:
continue
if s['VpcId'] not in main_tables:
continue
rt_subnet_map.setdefault(main_tables[s['VpcId']], []).append(s['SubnetId'])
related_subnets = set(itertools.chain(*rt_subnet_map.values()))
self.RelatedIdMapping = rt_subnet_map
return {s['SubnetId']: s for s in subnets if s['SubnetId'] in related_subnets}
@RouteTable.filter_registry.register('route')
class Route(ValueFilter):
"""Filter a route table by its routes' attributes."""
schema = type_schema('route', rinherit=ValueFilter.schema)
schema_alias = False
def process(self, resources, event=None):
results = []
for r in resources:
matched = []
for route in r['Routes']:
if self.match(route):
matched.append(route)
if matched:
r.setdefault('c7n:matched-routes', []).extend(matched)
results.append(r)
return results
@resources.register('transit-gateway')
class TransitGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
enum_spec = ('describe_transit_gateways', 'TransitGateways', None)
name = id = 'TransitGatewayId'
arn = "TransitGatewayArn"
filter_name = 'TransitGatewayIds'
filter_type = 'list'
cfn_type = 'AWS::EC2::TransitGateway'
class TransitGatewayAttachmentQuery(query.ChildResourceQuery):
def get_parent_parameters(self, params, parent_id, parent_key):
merged_params = dict(params)
merged_params.setdefault('Filters', []).append(
{'Name': parent_key, 'Values': [parent_id]})
return merged_params
@query.sources.register('transit-attachment')
class TransitAttachmentSource(query.ChildDescribeSource):
resource_query_factory = TransitGatewayAttachmentQuery
@resources.register('transit-attachment')
class TransitGatewayAttachment(query.ChildResourceManager):
child_source = 'transit-attachment'
class resource_type(query.TypeInfo):
service = 'ec2'
enum_spec = ('describe_transit_gateway_attachments', 'TransitGatewayAttachments', None)
parent_spec = ('transit-gateway', 'transit-gateway-id', None)
name = id = 'TransitGatewayAttachmentId'
arn = False
cfn_type = 'AWS::EC2::TransitGatewayAttachment'
@resources.register('peering-connection')
class PeeringConnection(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-peering-connection'
enum_spec = ('describe_vpc_peering_connections',
'VpcPeeringConnections', None)
name = id = 'VpcPeeringConnectionId'
filter_name = 'VpcPeeringConnectionIds'
filter_type = 'list'
id_prefix = "pcx-"
cfn_type = config_type = "AWS::EC2::VPCPeeringConnection"
@PeeringConnection.filter_registry.register('cross-account')
class CrossAccountPeer(CrossAccountAccessFilter):
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=resolver.ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DescribeVpcPeeringConnections',)
def process(self, resources, event=None):
results = []
accounts = self.get_accounts()
owners = map(jmespath.compile, (
'AccepterVpcInfo.OwnerId', 'RequesterVpcInfo.OwnerId'))
for r in resources:
for o_expr in owners:
account_id = o_expr.search(r)
if account_id and account_id not in accounts:
r.setdefault(
'c7n:CrossAccountViolations', []).append(account_id)
results.append(r)
return results
@PeeringConnection.filter_registry.register('missing-route')
class MissingRoute(Filter):
"""Return peers which are missing a route in route tables.
If the peering connection is between two vpcs in the same account,
the connection is returned unless it is in present route tables in
each vpc.
If the peering connection is between accounts, then the local vpc's
route table is checked.
"""
schema = type_schema('missing-route')
permissions = ('ec2:DescribeRouteTables',)
def process(self, resources, event=None):
tables = self.manager.get_resource_manager(
'route-table').resources()
routed_vpcs = {}
mid = 'VpcPeeringConnectionId'
for t in tables:
for r in t.get('Routes', ()):
if mid in r:
routed_vpcs.setdefault(r[mid], []).append(t['VpcId'])
results = []
for r in resources:
if r[mid] not in routed_vpcs:
results.append(r)
continue
for k in ('AccepterVpcInfo', 'RequesterVpcInfo'):
if r[k]['OwnerId'] != self.manager.config.account_id:
continue
if r[k].get('Region') and r['k']['Region'] != self.manager.config.region:
continue
if r[k]['VpcId'] not in routed_vpcs[r['VpcPeeringConnectionId']]:
results.append(r)
break
return results
@resources.register('network-acl')
class NetworkAcl(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'network-acl'
enum_spec = ('describe_network_acls', 'NetworkAcls', None)
name = id = 'NetworkAclId'
filter_name = 'NetworkAclIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::NetworkAcl"
id_prefix = "acl-"
@NetworkAcl.filter_registry.register('subnet')
class AclSubnetFilter(net_filters.SubnetFilter):
"""Filter network acls by the attributes of their attached subnets.
:example:
.. code-block:: yaml
policies:
- name: subnet-acl
resource: network-acl
filters:
- type: subnet
key: "tag:Location"
value: Public
"""
RelatedIdsExpression = "Associations[].SubnetId"
@NetworkAcl.filter_registry.register('s3-cidr')
class AclAwsS3Cidrs(Filter):
"""Filter network acls by those that allow access to s3 cidrs.
Defaults to filtering those nacls that do not allow s3 communication.
:example:
Find all nacls that do not allow communication with s3.
.. code-block:: yaml
policies:
- name: s3-not-allowed-nacl
resource: network-acl
filters:
- s3-cidr
"""
# TODO allow for port specification as range
schema = type_schema(
's3-cidr',
egress={'type': 'boolean', 'default': True},
ingress={'type': 'boolean', 'default': True},
present={'type': 'boolean', 'default': False})
permissions = ('ec2:DescribePrefixLists',)
def process(self, resources, event=None):
ec2 = local_session(self.manager.session_factory).client('ec2')
cidrs = jmespath.search(
"PrefixLists[].Cidrs[]", ec2.describe_prefix_lists())
cidrs = [parse_cidr(cidr) for cidr in cidrs]
results = []
check_egress = self.data.get('egress', True)
check_ingress = self.data.get('ingress', True)
present = self.data.get('present', False)
for r in resources:
matched = {cidr: None for cidr in cidrs}
for entry in r['Entries']:
if entry['Egress'] and not check_egress:
continue
if not entry['Egress'] and not check_ingress:
continue
entry_cidr = parse_cidr(entry['CidrBlock'])
for c in matched:
if c in entry_cidr and matched[c] is None:
matched[c] = (
entry['RuleAction'] == 'allow' and True or False)
if present and all(matched.values()):
results.append(r)
elif not present and not all(matched.values()):
results.append(r)
return results
@resources.register('elastic-ip', aliases=('network-addr',))
class NetworkAddress(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'eip-allocation'
enum_spec = ('describe_addresses', 'Addresses', None)
name = 'PublicIp'
id = 'AllocationId'
filter_name = 'AllocationIds'
filter_type = 'list'
config_type = "AWS::EC2::EIP"
NetworkAddress.filter_registry.register('shield-enabled', IsShieldProtected)
NetworkAddress.action_registry.register('set-shield', SetShieldProtection)
@NetworkAddress.action_registry.register('release')
class AddressRelease(BaseAction):
"""Action to release elastic IP address(es)
Use the force option to cause any attached elastic IPs to
also be released. Otherwise, only unattached elastic IPs
will be released.
:example:
.. code-block:: yaml
policies:
- name: release-network-addr
resource: network-addr
filters:
- AllocationId: ...
actions:
- type: release
force: True
"""
schema = type_schema('release', force={'type': 'boolean'})
permissions = ('ec2:ReleaseAddress', 'ec2:DisassociateAddress',)
def process_attached(self, client, associated_addrs):
for aa in list(associated_addrs):
try:
client.disassociate_address(AssociationId=aa['AssociationId'])
except ClientError as e:
# If its already been diassociated ignore, else raise.
if not(e.response['Error']['Code'] == 'InvalidAssocationID.NotFound' and
aa['AssocationId'] in e.response['Error']['Message']):
raise e
associated_addrs.remove(aa)
return associated_addrs
def process(self, network_addrs):
client = local_session(self.manager.session_factory).client('ec2')
force = self.data.get('force')
assoc_addrs = [addr for addr in network_addrs if 'AssociationId' in addr]
unassoc_addrs = [addr for addr in network_addrs if 'AssociationId' not in addr]
if len(assoc_addrs) and not force:
self.log.warning(
"Filtered %d attached eips of %d eips. Use 'force: true' to release them.",
len(assoc_addrs), len(network_addrs))
elif len(assoc_addrs) and force:
unassoc_addrs = itertools.chain(
unassoc_addrs, self.process_attached(client, assoc_addrs))
for r in unassoc_addrs:
try:
client.release_address(AllocationId=r['AllocationId'])
except ClientError as e:
# If its already been released, ignore, else raise.
if e.response['Error']['Code'] != 'InvalidAllocationID.NotFound':
raise
@resources.register('customer-gateway')
class CustomerGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'customer-gateway'
enum_spec = ('describe_customer_gateways', 'CustomerGateways', None)
id = 'CustomerGatewayId'
filter_name = 'CustomerGatewayIds'
filter_type = 'list'
name = 'CustomerGatewayId'
id_prefix = "cgw-"
cfn_type = config_type = 'AWS::EC2::CustomerGateway'
@resources.register('internet-gateway')
class InternetGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'internet-gateway'
enum_spec = ('describe_internet_gateways', 'InternetGateways', None)
name = id = 'InternetGatewayId'
filter_name = 'InternetGatewayIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::InternetGateway"
id_prefix = "igw-"
@InternetGateway.action_registry.register('delete')
class DeleteInternetGateway(BaseAction):
"""Action to delete Internet Gateway
:example:
.. code-block:: yaml
policies:
- name: delete-internet-gateway
resource: internet-gateway
actions:
- type: delete
"""
schema = type_schema('delete')
permissions = ('ec2:DeleteInternetGateway',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
try:
client.delete_internet_gateway(InternetGatewayId=r['InternetGatewayId'])
except ClientError as err:
if not err.response['Error']['Code'] == 'InvalidInternetGatewayId.NotFound':
raise
@resources.register('nat-gateway')
class NATGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'nat-gateway'
enum_spec = ('describe_nat_gateways', 'NatGateways', None)
name = id = 'NatGatewayId'
filter_name = 'NatGatewayIds'
filter_type = 'list'
date = 'CreateTime'
dimension = 'NatGatewayId'
metrics_namespace = 'AWS/NATGateway'
id_prefix = "nat-"
cfn_type = config_type = 'AWS::EC2::NatGateway'
@NATGateway.action_registry.register('delete')
class DeleteNATGateway(BaseAction):
schema = type_schema('delete')
permissions = ('ec2:DeleteNatGateway',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
client.delete_nat_gateway(NatGatewayId=r['NatGatewayId'])
@resources.register('vpn-connection')
class VPNConnection(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-connection'
enum_spec = ('describe_vpn_connections', 'VpnConnections', None)
name = id = 'VpnConnectionId'
filter_name = 'VpnConnectionIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPNConnection'
id_prefix = "vpn-"
@resources.register('vpn-gateway')
class VPNGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-gateway'
enum_spec = ('describe_vpn_gateways', 'VpnGateways', None)
name = id = 'VpnGatewayId'
filter_name = 'VpnGatewayIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPNGateway'
id_prefix = "vgw-"
@resources.register('vpc-endpoint')
class VpcEndpoint(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-endpoint'
enum_spec = ('describe_vpc_endpoints', 'VpcEndpoints', None)
name = id = 'VpcEndpointId'
date = 'CreationTimestamp'
filter_name = 'VpcEndpointIds'
filter_type = 'list'
id_prefix = "vpce-"
universal_taggable = object()
cfn_type = config_type = "AWS::EC2::VPCEndpoint"
@VpcEndpoint.filter_registry.register('cross-account')
class EndpointCrossAccountFilter(CrossAccountAccessFilter):
policy_attribute = 'PolicyDocument'
annotation_key = 'c7n:CrossAccountViolations'
permissions = ('ec2:DescribeVpcEndpoints',)
@VpcEndpoint.filter_registry.register('security-group')
class EndpointSecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "Groups[].GroupId"
@VpcEndpoint.filter_registry.register('subnet')
class EndpointSubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "SubnetIds[]"
@VpcEndpoint.filter_registry.register('vpc')
class EndpointVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@resources.register('key-pair')
class KeyPair(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'key-pair'
enum_spec = ('describe_key_pairs', 'KeyPairs', None)
name = id = 'KeyName'
filter_name = 'KeyNames'
@KeyPair.filter_registry.register('unused')
class UnusedKeyPairs(Filter):
"""Filter for used or unused keys.
The default is unused but can be changed by using the state property.
:example:
.. code-block:: yaml
policies:
- name: unused-key-pairs
resource: aws.key-pair
filters:
- unused
- name: used-key-pairs
resource: aws.key-pair
filters:
- type: unused
state: false
"""
annotation_key = 'c7n:unused_keys'
permissions = ('ec2:DescribeKeyPairs',)
schema = type_schema('unused',
state={'type': 'boolean'})
def process(self, resources, event=None):
instances = self.manager.get_resource_manager('ec2').resources()
used = set(jmespath.search('[].KeyName', instances))
if self.data.get('state', True):
return [r for r in resources if r['KeyName'] not in used]
else:
return [r for r in resources if r['KeyName'] in used]
@KeyPair.action_registry.register('delete')
class DeleteUnusedKeyPairs(BaseAction):
"""Delete all ec2 keys that are not in use
This should always be used with the unused filter
and it will prevent you from using without it.
:example:
.. code-block:: yaml
policies:
- name: delete-unused-key-pairs
resource: aws.key-pair
filters:
- unused
actions:
- delete
"""
permissions = ('ec2:DeleteKeyPair',)
schema = type_schema('delete')
def validate(self):
if not [f for f in self.manager.iter_filters() if isinstance(f, UnusedKeyPairs)]:
raise PolicyValidationError(
"delete should be used in conjunction with the unused filter on %s" % (
self.manager.data,))
if [True for f in self.manager.iter_filters() if f.data.get('state') is False]:
raise PolicyValidationError(
"You policy has filtered used keys you should use this with unused keys %s" % (
self.manager.data,))
return self
def process(self, unused):
client = local_session(self.manager.session_factory).client('ec2')
for key in unused:
client.delete_key_pair(KeyPairId=key['KeyPairId'])
@Vpc.action_registry.register('set-flow-log')
@Subnet.action_registry.register('set-flow-log')
@NetworkInterface.action_registry.register('set-flow-log')
class CreateFlowLogs(BaseAction):
"""Create flow logs for a network resource
:example:
.. code-block:: yaml
policies:
- name: vpc-enable-flow-logs
resource: vpc
filters:
- type: flow-logs
enabled: false
actions:
- type: set-flow-log
DeliverLogsPermissionArn: arn:iam:role
LogGroupName: /custodian/vpc/flowlogs/
"""
permissions = ('ec2:CreateFlowLogs', 'logs:CreateLogGroup',)
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['set-flow-log']},
'state': {'type': 'boolean'},
'DeliverLogsPermissionArn': {'type': 'string'},
'LogGroupName': {'type': 'string'},
'LogDestination': {'type': 'string'},
'LogFormat': {'type': 'string'},
'MaxAggregationInterval': {'type': 'integer'},
'LogDestinationType': {'enum': ['s3', 'cloud-watch-logs']},
'TrafficType': {
'type': 'string',
'enum': ['ACCEPT', 'REJECT', 'ALL']
}
}
}
RESOURCE_ALIAS = {
'vpc': 'VPC',
'subnet': 'Subnet',
'eni': 'NetworkInterface'
}
SchemaValidation = {
's3': {
'required': ['LogDestination'],
'absent': ['LogGroupName', 'DeliverLogsPermissionArn']
},
'cloud-watch-logs': {
'required': ['DeliverLogsPermissionArn'],
'one-of': ['LogGroupName', 'LogDestination'],
}
}
def validate(self):
self.state = self.data.get('state', True)
if not self.state:
return
destination_type = self.data.get(
'LogDestinationType', 'cloud-watch-logs')
dvalidation = self.SchemaValidation[destination_type]
for r in dvalidation.get('required', ()):
if not self.data.get(r):
raise PolicyValidationError(
'Required %s missing for destination-type:%s' % (
r, destination_type))
for r in dvalidation.get('absent', ()):
if r in self.data:
raise PolicyValidationError(
'%s is prohibited for destination-type:%s' % (
r, destination_type))
if ('one-of' in dvalidation and
sum([1 for k in dvalidation['one-of'] if k in self.data]) != 1):
raise PolicyValidationError(
"Destination:%s Exactly one of %s required" % (
destination_type, ", ".join(dvalidation['one-of'])))
return self
def delete_flow_logs(self, client, rids):
flow_logs = client.describe_flow_logs(
Filters=[{'Name': 'resource-id', 'Values': rids}])['FlowLogs']
try:
results = client.delete_flow_logs(
FlowLogIds=[f['FlowLogId'] for f in flow_logs])
for r in results['Unsuccessful']:
self.log.exception(
'Exception: delete flow-log for %s: %s on %s',
r['ResourceId'], r['Error']['Message'])
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidParameterValue':
self.log.exception(
'delete flow-log: %s', e.response['Error']['Message'])
else:
raise
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
params = dict(self.data)
params.pop('type')
if self.data.get('state'):
params.pop('state')
model = self.manager.get_model()
params['ResourceIds'] = [r[model.id] for r in resources]
if not self.state:
self.delete_flow_logs(client, params['ResourceIds'])
return
params['ResourceType'] = self.RESOURCE_ALIAS[model.arn_type]
params['TrafficType'] = self.data.get('TrafficType', 'ALL').upper()
params['MaxAggregationInterval'] = self.data.get('MaxAggregationInterval', 600)
if self.data.get('LogDestinationType', 'cloud-watch-logs') == 'cloud-watch-logs':
self.process_log_group(self.data.get('LogGroupName'))
try:
results = client.create_flow_logs(**params)
for r in results['Unsuccessful']:
self.log.exception(
'Exception: create flow-log for %s: %s',
r['ResourceId'], r['Error']['Message'])
except ClientError as e:
if e.response['Error']['Code'] == 'FlowLogAlreadyExists':
self.log.exception(
'Exception: create flow-log: %s',
e.response['Error']['Message'])
else:
raise
def process_log_group(self, logroup):
client = local_session(self.manager.session_factory).client('logs')
try:
client.create_log_group(logGroupName=logroup)
except client.exceptions.ResourceAlreadyExistsException:
pass
| StarcoderdataPython |
1767845 | <reponame>cmhc/cs
#coding:utf8
'''
clean style
===========
简介:清理代码中的样式,但是不清理标签
###功能和用途###
用作抓取网页中含有大量的无用标记,本程序能够有效的清理,但是请注意,程序使用正则表达式
性能的消耗可能会不小,请在自己的本机上跑,避免在服务器上运行
'''
import re
#默认忽略参数
#清理,参数为html内容
def clean(html,ignore="img",deltags="a|span"):
#首先只留下标签
if ignore == '':
exp = "<(?P<tag>\w*)\s[^>]*>"#忽略为空,清除所有的标记
else:
exp = "<(?P<tag>(?!"+ignore+")\w*)\s[^>]*>"#只留下标签,但是保留a标签和img标签
match =re.match(exp,html)
sub = "<\g<tag>>"
html = re.sub(exp,sub,html);
#去除内有内容的标签,反向引用之前匹配到的tag
exp = "<(?P<tag>\w*)>\s{0,}</(?P=tag)[^>]>"
sub = ''
html = re.sub(exp,sub,html)
#清除脚本和样式表以及空行
exp = "<(?P<tag>script|style)>[^<]*</(?P=tag)>"
sub = ''
html = re.sub(exp,sub,html)
#删除a和span标签
if deltags != '':
exp = "</?(?:" +deltags+ ")?>"
sub = ''
html = re.sub(exp,sub,html)
#清除所有换行符
html = html.replace("\n","")
return html
'''
进行测试
'''
#print clean("<strong sdfsd>he</strong><img src='' /><span>这是span标签</span><br /><a href=''>这是a标签</a><html sdfs></html>","","")
| StarcoderdataPython |
1633762 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""This module contains unit tests for :mod:`~prody.ensemble`."""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
from os.path import join
from prody.tests import TestCase
from numpy.testing import assert_equal, assert_allclose
from prody import DCDFile, writeDCD, parseDCD
from prody.tests import TEMPDIR
from prody.tests.test_ensemble import ALLATOMS, ENSEMBLE, RTOL, ATOL, DCD
class TestDCDFile(TestCase):
def setUp(self):
self.dcd = join(TEMPDIR, 'temp.dcd')
def testWriteDCD(self):
dcd = writeDCD(self.dcd, ALLATOMS)
self.assertEqual(dcd, self.dcd, 'failed to write DCD file')
def testParseDCD(self):
e = parseDCD(writeDCD(self.dcd, ALLATOMS))
assert_equal(e._getCoordsets(), DCD._getCoordsets(),
err_msg='failed to parse DCD file correctly')
def testWrite(self):
dcd = DCDFile(self.dcd, 'w')
dcd.write(ENSEMBLE.getCoordsets())
dcd.close()
e = parseDCD(self.dcd)
assert_allclose(e._getCoordsets(), ENSEMBLE._getCoordsets(),
rtol=RTOL, atol=ATOL,
err_msg='failed to parse DCD file correctly')
def testWriteModeAppend(self):
dcd = DCDFile(writeDCD(self.dcd, ENSEMBLE), 'a')
dcd.write(ENSEMBLE.getCoordsets())
dcd.close()
e = parseDCD(self.dcd)
n_csets = len(ENSEMBLE)
coordsets = e._getCoordsets()
assert_equal(coordsets, coordsets,
'failed to parse DCD file correctly')
assert_allclose(coordsets[:n_csets], ENSEMBLE._getCoordsets(),
rtol=RTOL, atol=ATOL,
err_msg='failed to parse DCD file correctly')
| StarcoderdataPython |
1759498 | <reponame>zinebabercha/zineb-abercha<filename>1.Chapter-Python/presentation/ch01/1.age1.py
# Copyright 2013, <NAME>
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# <NAME>, <NAME>, and <NAME>
# <NAME>, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
age = -1 # an initially invalid choice
while age <= 0:
try:
age = int(input('Enter your age in years: '))
if age <= 0:
print('Your age must be positive')
except (ValueError, EOFError):
print('Invalid response')
| StarcoderdataPython |
4831355 | <filename>fonty/lib/__init__.py
'''fonty.lib'''
| StarcoderdataPython |
1651353 | <reponame>portfolioplus/pysymbolscanner<gh_stars>1-10
import wikipedia as wp
import wptools
from pysymbolscanner.infobox import Infobox
from pysymbolscanner.const import (
blocklist_search,
most_common_endings,
remove_most_common_endings,
)
from pysymbolscanner.utils import get_wiki_page_title_and_links, get_wiki_url
def _get_infobox_of_page(name, check_item, lang):
try:
if lang == 'es':
opt = {
'boxterm': 'Ficha',
'skip': ['imageinfo'],
'silent': True,
'lang': lang,
}
else:
opt = {'skip': ['imageinfo'], 'silent': True, 'lang': lang}
page = wptools.page(name, **opt).get_parse()
# search item must be in data
search_str = str(page.data.get('wikitext', '')).lower()
check_item_search = remove_most_common_endings(check_item).lower()
name_search = remove_most_common_endings(name).lower()
ctx_name = max(
(
search_str.count(check_item_search),
search_str.count(name_search),
)
)
if name_search != check_item_search and ctx_name < 5:
return None
infobox = page.data['infobox']
if infobox:
infobox = {k.lower(): v for k, v in infobox.items()}
return infobox
except LookupError:
return None
def _is_infobox(infobox):
if infobox is None:
return False
infobox_items = [
'nam',
'effectif',
'date de création',
'siège (pays)',
'name',
'foundation',
'hq_location_country',
'unternehmen',
'gründung_verein',
'location',
'industry',
'num_employees',
'traded_as',
'isin',
'gründungsdatum',
'mitarbeiterzahl',
'nombre',
'empleados',
'sede',
'sitz',
]
ctx = sum(map(lambda x: 1 if x in infobox else 0, infobox_items))
if ctx > 1:
return True
return False
def _is_in_infobox(infobox, search):
search = search.replace('Rosagro', 'Rusagro')
search_items = [search] if len(search.split()) == 0 else search.split()
values = list(filter(lambda x: x not in most_common_endings, search_items))
ctx = 0
for value in values:
if any(
map(
lambda x, val=value: val.lower() in x.lower(), infobox.values()
)
):
ctx += 1
result = ctx / len(values) > 0.5
return result
def get_wiki_infobox(page_search, lang_codes=['en', 'de', 'es', 'fr']):
for lang in lang_codes:
wp.set_lang(lang)
search = filter(
lambda x: x not in blocklist_search,
wp.search(page_search, results=3),
)
if not search:
continue
for item in search:
infobox = _get_infobox_of_page(item, page_search, lang)
if _is_infobox(infobox):
return infobox, lang
return None
def get_infobox(page_search, lang_codes=['en', 'de', 'es', 'fr']):
infobox = get_wiki_infobox(page_search, lang_codes)
if infobox is None or infobox[1] is None:
return None
parsed_infobox = Infobox.from_wiki_infobox(*infobox)
if not parsed_infobox.name:
parsed_infobox.name = page_search
parsed_infobox.names.append(page_search)
return parsed_infobox
def get_merged_infobox(page_search, link, link_lang, lang_codes=None):
if lang_codes is None:
lang_codes = ['en', 'de', 'es', 'fr']
result = None
page_search_dict = dict(map(lambda x: (x, None), lang_codes))
# find all wikipages for company name
if link and link_lang:
wiki_url = get_wiki_url(link_lang, link.replace('/wiki/', ''))
company, links = get_wiki_page_title_and_links(
wiki_url, lang_codes
)
page_search_dict[link_lang] = company
for wiki_link, wiki_link_lang in links:
company, _ = get_wiki_page_title_and_links(
wiki_link, lang_codes
)
page_search_dict[wiki_link_lang] = company
# get all infobox data for each wikipage
for lang in lang_codes:
if not page_search_dict[lang]:
continue
infobox = get_infobox(page_search_dict[lang], [lang])
if infobox is None:
continue
if result:
result.update(infobox)
else:
result = infobox
return result
| StarcoderdataPython |
187928 | """
File: hailstone.py
Name: <NAME>
-----------------------
This program should implement a console program that simulates
the execution of the Hailstone sequence, as defined by Douglas
Hofstadter. Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
# This constant controls when to stop
EXIT = 1
def main():
"""
User give a real number and the coder will follow below rule continuously until reach 1.
Rule: (1) If a given number(n) is odd, coder will multiple x by 3 and then plus 1: n -> 3n + 1
(2) if a given number(n) is even, coder will divide x by 2: n -> n/2
"""
print('This program computes Hailstone sequences.')
print('')
# num = User's input
num = int(input('Enter a number: '))
n = num
# t = number of steps to reach 1
t = 0
# check if User's input equals 1
if n == EXIT:
print('It took ' + str(t) + ' steps to reach 1.')
else:
# compute continuously while User's input does not equal 1
while n != EXIT:
# check whether a number is odd or even before adopting rule (1) or (2)
if n % 2 == 1:
# n is odd
n1 = int(3 * n + 1)
print(str(n) + ' is odd, so I make 3n+1: ' + str(n1))
# return calculated value(n1) -> n
n = n1
else:
# n is even
n1 = int(n / 2)
print(str(n) + ' is even, so I take half: ' + str(n1))
# return calculated value(n1) -> n
n = n1
# count steps
t += 1
# print result after a given number reach 1
print('It took ' + str(t) + ' steps to reach 1.')
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
| StarcoderdataPython |
3226995 | <filename>str_analysis/convert_gangstr_spec_to_expansion_hunter_variant_catalog.py<gh_stars>1-10
"""This script converts a GangSTR repeat spec to an ExpansionHunter variant catalog. This simplifies the process of
switching from GangSTR to ExpansionHunter to genotype a set of loci previously genotyped using GangSTR.
"""
import argparse
import collections
import gzip
import json
from pprint import pformat
import re
import tqdm
def main():
p = argparse.ArgumentParser()
p.add_argument("-o", "--output-file", help="json file output path")
p.add_argument("-v", "--verbose", action="store_true")
p.add_argument("gangstr_spec", help="path of the GangSTR repeat spec .bed file")
args = p.parse_args()
if not args.output_file:
args.output_file = re.sub(".bed(.gz)?$", "", args.gangstr_spec) + ".variant_catalog.json"
process_variant_catalog(args.gangstr_spec, args.output_file, verbose=args.verbose)
def process_variant_catalog(gangstr_spec_path, output_file_path, verbose=False):
print(f"Parsing {gangstr_spec_path}")
json_records = []
existing_locus_ids = set()
counter = collections.defaultdict(int)
with (gzip.open if gangstr_spec_path.endswith("gz") else open)(gangstr_spec_path, "rt") as f:
for row in tqdm.tqdm(f, unit=" records"):
fields = row.strip("\n").split("\t")
chrom = fields[0]
start_0based = int(fields[1]) - 1
end_1based = int(fields[2])
repeat_unit = fields[4]
if len(fields) > 5:
off_target_regions = fields[5]
if len(off_target_regions) > 1:
print(f"WARNING: found GangSTR spec with off-target regions. This script doesn't yet support "
f"transferring off-target regions to the variant catalog")
counter["total input loci"] += 1
trim_bp = (end_1based - start_0based) % len(repeat_unit)
if trim_bp != 0:
counter["trimmed locus"] += 1
if verbose:
print(f"WARNING: {chrom}:{start_0based}-{end_1based} interval has size {end_1based - start_0based} "
f"which is not a multiple of the repeat unit {repeat_unit} (size {len(repeat_unit)}). "
f"Changing it to {chrom}:{start_0based}-{end_1based - trim_bp}")
end_1based -= trim_bp
assert (end_1based - start_0based) % len(repeat_unit) == 0
locus_id = f"{chrom}-{start_0based}-{end_1based}-{repeat_unit}"
if locus_id in existing_locus_ids:
counter["skipped duplicate"] += 1
if verbose:
print(f"WARNING: skipping duplicate locus id: {locus_id}")
continue
existing_locus_ids.add(locus_id)
json_records.append({
"LocusId": locus_id,
"ReferenceRegion": f"{chrom}:{start_0based}-{end_1based}",
"LocusStructure": f"({repeat_unit})*",
"VariantType": "Repeat",
})
# TODO add support for off-target regions
with (gzip.open if output_file_path.endswith("gz") else open)(output_file_path, "wt") as f:
json.dump(json_records, f, indent=4)
print(f"Wrote out {output_file_path}")
print(pformat(dict(counter)))
if __name__ == "__main__":
main()
| StarcoderdataPython |
3328939 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'shell_api',
'type': 'static_library',
'sources': [
'<@(schema_files)',
],
# TODO(jschuh): http://crbug.com/167187 size_t -> int
'msvs_disabled_warnings': [ 4267 ],
'includes': [
'../../../../build/json_schema_bundle_compile.gypi',
'../../../../build/json_schema_compile.gypi',
],
'variables': {
'chromium_code': 1,
'non_compiled_schema_files': [
],
# TODO(thestig): Eliminate these on Android. See crbug.com/305852.
'schema_files': [
'shell.idl',
],
'cc_dir': 'extensions/shell/common/api',
'root_namespace': 'extensions::shell_api::%(namespace)s',
'impl_dir': 'extensions/shell/browser/api',
},
'dependencies': [
'<(DEPTH)/skia/skia.gyp:skia',
],
},
],
}
| StarcoderdataPython |
4842304 | <filename>apps/logs/views.py
from collections import Counter
from functools import reduce
from pprint import pprint
from time import monotonic
from core.exceptions import BadRequestException
from core.filters import PkMultiValueFilterBackend
from core.logic.dates import date_filter_from_params, parse_month
from core.logic.serialization import parse_b64json
from core.models import REL_ORG_ADMIN, DataSource
from core.permissions import (
CanAccessOrganizationFromGETAttrs,
CanAccessOrganizationRelatedObjectPermission,
CanPostOrganizationDataPermission,
ManualDataUploadEnabledPermission,
OrganizationRequiredInDataForNonSuperusers,
OwnerLevelBasedPermissions,
SuperuserOrAdminPermission,
)
from core.prometheus import report_access_time_summary, report_access_total_counter
from core.validators import month_validator, pk_list_validator
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import BadRequest
from django.db.models import Count, Exists, OuterRef, Prefetch, Q
from django.db.transaction import atomic
from django.http import JsonResponse
from django.urls import reverse
from django.views import View
from organizations.logic.queries import organization_filter_from_org_id
from pandas import DataFrame
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied
from rest_framework.fields import CharField, ListField
from rest_framework.generics import get_object_or_404
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.serializers import DateField, IntegerField, Serializer
from rest_framework.status import HTTP_200_OK, HTTP_201_CREATED, HTTP_400_BAD_REQUEST
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from rest_pandas import PandasView
from scheduler.models import FetchIntention
from sushi.models import AttemptStatus, SushiCredentials, SushiFetchAttempt
from logs.logic.export import CSVExport
from logs.logic.queries import StatsComputer, extract_accesslog_attr_query_params
from logs.models import (
AccessLog,
Dimension,
DimensionText,
FlexibleReport,
ImportBatch,
InterestGroup,
ManualDataUpload,
MduState,
Metric,
ReportInterestMetric,
ReportType,
)
from logs.serializers import (
AccessLogSerializer,
DimensionSerializer,
DimensionTextSerializer,
FlexibleReportSerializer,
ImportBatchSerializer,
ImportBatchVerboseSerializer,
InterestGroupSerializer,
ManualDataUploadSerializer,
ManualDataUploadVerboseSerializer,
MetricSerializer,
ReportTypeInterestSerializer,
ReportTypeSerializer,
)
from . import filters
from .logic.reporting.slicer import FlexibleDataSlicer, SlicerConfigError, SlicerConfigErrorCode
from .tasks import export_raw_data_task
class StandardResultsSetPagination(PageNumberPagination):
page_size = 100
page_size_query_param = 'page_size'
max_page_size = 5000
class Counter5DataView(APIView):
# permission_classes = [IsAuthenticated &
# (SuperuserOrAdminPermission | CanAccessOrganizationFromGETAttrs)
# ]
def get(self, request, report_type_id):
report_type = get_object_or_404(ReportType, pk=report_type_id)
computer = StatsComputer()
start = monotonic()
# special attribute signaling that this view is used on dashboard and thus we
# want to cache the data for extra speed using recache
dashboard_view = 'dashboard' in request.GET
data = computer.get_data(report_type, request.GET, request.user, recache=dashboard_view)
label_attrs = dict(view_type='chart_data_raw', report_type=computer.used_report_type.pk)
report_access_total_counter.labels(**label_attrs).inc()
report_access_time_summary.labels(**label_attrs).observe(monotonic() - start)
data_format = request.GET.get('format')
if data_format in ('csv', 'xlsx'):
# for the bare result, we do not add any extra information, just output the list
data = DataFrame(data)
new_keys = [computer.io_prim_dim_name]
if computer.io_sec_dim_name:
new_keys.append(computer.io_sec_dim_name)
# we set the queried dimensions as index so that the default integer index is not
# added to the result
data.set_index(new_keys, drop=True, inplace=True)
return Response(
data,
headers={'Content-Disposition': f'attachment; filename="export.{data_format}"'},
)
# prepare the data to return
reply = {'data': data}
if computer.prim_dim_obj:
reply[computer.prim_dim_name] = DimensionSerializer(computer.prim_dim_obj).data
if computer.sec_dim_obj:
reply[computer.sec_dim_name] = DimensionSerializer(computer.sec_dim_obj).data
reply['reported_metrics'] = MetricSerializer(
computer.reported_metrics.values(), many=True
).data
return Response(reply)
class ReportTypeViewSet(ReadOnlyModelViewSet):
serializer_class = ReportTypeSerializer
queryset = ReportType.objects.filter(materialization_spec__isnull=True)
filter_backends = [PkMultiValueFilterBackend]
def get_queryset(self):
if 'nonzero-only' in self.request.query_params:
return self.queryset.filter(
Q(Exists(ImportBatch.objects.filter(report_type_id=OuterRef('pk'))))
| Q(short_name='interest')
).prefetch_related('controlled_metrics')
return self.queryset
class MetricViewSet(ReadOnlyModelViewSet):
serializer_class = MetricSerializer
queryset = Metric.objects.all()
filter_backends = [PkMultiValueFilterBackend]
class ReportInterestMetricViewSet(ReadOnlyModelViewSet):
serializer_class = ReportTypeInterestSerializer
queryset = (
ReportType.objects.filter(materialization_spec__isnull=True)
.exclude(short_name='interest')
.annotate(used_by_platforms=Count('platforminterestreport__platform', distinct=True))
.prefetch_related(
"interest_metrics",
Prefetch(
"reportinterestmetric_set",
queryset=ReportInterestMetric.objects.select_related(
"metric", "target_metric", "interest_group"
),
),
"controlled_metrics",
)
)
class DimensionTextViewSet(ReadOnlyModelViewSet):
serializer_class = DimensionTextSerializer
queryset = DimensionText.objects.all()
pagination_class = StandardResultsSetPagination
filter_backends = [PkMultiValueFilterBackend]
@property
def paginator(self):
if 'pks' in self.request.query_params:
# if 'pks' are explicitly given, do not paginate and return all
return None
return super().paginator
def post(self, request):
"""
To get around possible limits in query string length, we also provide a POST interface
for getting data for a list of IDs.
It only works if 'pks' attribute is given and does not use pagination
"""
pks = request.data.get('pks', [])
dts = DimensionText.objects.filter(pk__in=pks)
# we do not paginate when using post
return Response(self.get_serializer(dts, many=True).data)
class RawDataExportView(PandasView):
serializer_class = AccessLogSerializer
implicit_dims = ['platform', 'metric', 'organization', 'target', 'report_type', 'import_batch']
export_size_limit = 100_000 # limit the number of records in output to this number
def get_queryset(self):
query_params = self.extract_query_filter_params(self.request)
data = AccessLog.objects.filter(**query_params).select_related(*self.implicit_dims)[
: self.export_size_limit
]
text_id_to_text = {
dt['id']: dt['text'] for dt in DimensionText.objects.all().values('id', 'text')
}
tr_to_dimensions = {rt.pk: rt.dimensions_sorted for rt in ReportType.objects.all()}
for al in data:
al.mapped_dim_values_ = {}
for i, dim in enumerate(tr_to_dimensions[al.report_type_id]):
value = getattr(al, f'dim{i+1}')
al.mapped_dim_values_[dim.short_name] = text_id_to_text.get(value, value)
if al.target:
al.mapped_dim_values_['isbn'] = al.target.isbn
al.mapped_dim_values_['issn'] = al.target.issn
al.mapped_dim_values_['eissn'] = al.target.eissn
return data
@classmethod
def extract_query_filter_params(cls, request) -> dict:
query_params = date_filter_from_params(request.GET)
query_params.update(
extract_accesslog_attr_query_params(
request.GET, dimensions=cls.implicit_dims, mdu_filter=True
)
)
return query_params
class RawDataDelayedExportView(APIView):
permission_classes = [
IsAuthenticated
& (
SuperuserOrAdminPermission
| (OrganizationRequiredInDataForNonSuperusers & CanAccessOrganizationFromGETAttrs)
)
]
def get(self, request):
query_params = self.extract_query_filter_params(request)
exporter = CSVExport(query_params)
return JsonResponse({'total_count': exporter.record_count})
def post(self, request):
query_params = self.extract_query_filter_params(request)
exporter = CSVExport(query_params, zip_compress=True)
export_raw_data_task.delay(
query_params, exporter.filename_base, zip_compress=exporter.zip_compress
)
return JsonResponse(
{
'progress_url': reverse('raw_data_export_progress', args=(exporter.filename_base,)),
'result_url': exporter.file_url,
}
)
@classmethod
def extract_query_filter_params(cls, request) -> dict:
# we use celery with the params, so we need to make it serialization friendly
# thus we convert the params accordingly using str_date and used_ids
query_params = date_filter_from_params(request.GET, str_date=True)
query_params.update(
extract_accesslog_attr_query_params(
request.GET, dimensions=CSVExport.implicit_dims, use_ids=True
)
)
return query_params
class RawDataDelayedExportProgressView(View):
def get(self, request, handle):
count = None
if handle and handle.startswith('raw-data-'):
count = cache.get(handle)
return JsonResponse({'count': count})
class ImportBatchViewSet(ReadOnlyModelViewSet):
serializer_class = ImportBatchSerializer
queryset = ImportBatch.objects.all()
# pagination_class = StandardResultsSetPagination
filter_backends = [filters.AccessibleFilter, filters.UserFilter, filters.OrderByFilter]
def get_queryset(self):
qs = self.queryset
if 'pk' in self.kwargs:
# we only add accesslog_count if only one object was requested
qs = qs.annotate(accesslog_count=Count('accesslog'))
qs = qs.select_related('organization', 'platform', 'report_type')
return qs
def get_serializer_class(self):
if 'pk' in self.kwargs:
# for one result, we can use the verbose serializer
return ImportBatchVerboseSerializer
return super().get_serializer_class()
class LookupSerializer(Serializer):
organization = IntegerField(required=True)
platform = IntegerField(required=True)
report_type = IntegerField(required=True)
months = ListField(child=DateField(), allow_empty=False)
@action(detail=False, methods=['post'])
def lookup(self, request):
""" Based on provided list of records
[("organization", "platform", "report_type", "months")]
return corresponding import batches
"""
serializer = self.LookupSerializer(many=True, data=request.data)
serializer.is_valid(raise_exception=True)
fltr = Q(pk=None) # always empty
for record in serializer.data:
fltr |= (
Q(organization_id=record["organization"])
& Q(platform_id=record["platform"])
& Q(report_type=record["report_type"])
& Q(date__in=record["months"])
)
qs = ImportBatch.objects.filter(fltr)
# Only available organizations of the user
qs = filters.AccessibleFilter().filter_queryset(request, qs, self)
# Apply ordering
qs = filters.OrderByFilter().filter_queryset(request, qs, self)
# Optimizations
qs = (
qs.select_related(
'user',
'platform',
'platform__source',
'organization',
'report_type',
'sushifetchattempt',
)
.prefetch_related('mdu')
.annotate(accesslog_count=Count('accesslog'))
)
return Response(ImportBatchVerboseSerializer(qs, many=True).data)
class PurgeSerializer(Serializer):
batches = ListField(child=IntegerField(), allow_empty=False)
@atomic
@action(detail=False, methods=['post'], serializer_class=PurgeSerializer)
def purge(self, request):
""" Remove all data and related structures of given list of import batches
Note that if id of given ib doesn't exists it is not treated as an error
It might have been already deleted
"""
counter = Counter()
serializer = self.PurgeSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# only accesible batches
batches = filters.AccessibleFilter().filter_queryset(
request, ImportBatch.objects.filter(pk__in=serializer.data["batches"]), self
)
mdus = list(
ManualDataUpload.objects.filter(import_batches__in=batches).values_list('pk', flat=True)
)
# remove fetch intentions and fetch attempts
to_delete = (
FetchIntention.objects.filter(attempt__import_batch__in=batches)
.values('credentials__pk', 'counter_report__pk', 'start_date')
.distinct()
)
to_delete = [Q(**e) for e in to_delete]
to_delete = reduce(lambda x, y: x | y, to_delete, Q())
if to_delete:
fis_to_delete = FetchIntention.objects.filter(to_delete)
counter.update(
SushiFetchAttempt.objects.filter(fetchintention__in=fis_to_delete).delete()[1]
)
counter.update(fis_to_delete.delete()[1])
# remove import batches
counter.update(batches.delete()[1])
# remove empty manual data uploads
counter.update(
ManualDataUpload.objects.filter(pk__in=mdus, import_batches__isnull=True).delete()[1]
)
return Response(counter)
class DataPresenceParamSerializer(Serializer):
start_date = CharField(validators=[month_validator], required=True)
end_date = CharField(validators=[month_validator], required=True)
credentials = CharField(validators=[pk_list_validator], required=True)
@action(detail=False, methods=['get'], url_name='data-presence', url_path='data-presence')
def data_presence(self, request):
"""
Return a list of combinations of report_type, platform, organization and month for which
there are some data.
It requires a filter composed of `start_date`, `end_date` and `credentials` which is a
comma separated list of credentials primary keys.
The result is a list of dicts with `report_type_id`, `platform_id`, `organization_id`,
`date` and `source`. `source` is either `sushi` for data comming from SUSHI or `manual`
for manually uploaded data.
Please note that the resulting list may contain data which do not belong to any of the
credentials provided in `credentials` filter. This is because manually uploaded data
do not have a direct link to credentials and it would be too costly to remove this extra
data.
"""
# Note:
#
# This endpoint uses fetch attempt data for sushi data and access logs for manually
# uploaded data. We could simplify it by:
# * splitting data from manually uploaded data into one-month import batches
# * adding date to import batches
# * creating empty import batches for 3030 when we decide there is no reason to retry
#
# After these changes, we could simply query import batches to get the data for this view.
# TODO: FIX THIS FOR IMPORT BATCHES -
# we need to create empty import batches for 3030 for that
param_serializer = self.DataPresenceParamSerializer(data=request.GET)
param_serializer.is_valid(raise_exception=True)
params = param_serializer.validated_data
# prepare data from SUSHI - we use fetch attempts for that
credentials_ids = [int(cid) for cid in params['credentials'].split(',')]
credentials = SushiCredentials.objects.filter(
pk__in=credentials_ids, organization__in=request.user.accessible_organizations()
)
qs = SushiFetchAttempt.objects.filter(
start_date__gte=parse_month(params['start_date']),
start_date__lte=parse_month(params['end_date']),
credentials__in=credentials,
status__in=[AttemptStatus.NO_DATA, AttemptStatus.SUCCESS],
).select_related('credentials', 'counter_report')
records = {
tuple(rec): 'sushi'
for rec in qs.values_list(
'counter_report__report_type_id',
'credentials__platform_id',
'credentials__organization_id',
'start_date',
).distinct()
}
# now manually uploaded data - we need to go by AccessLog, there is no other place with
# date info
qs = AccessLog.objects.filter(
import_batch__mdu_link__isnull=False,
date__gte=parse_month(params['start_date']),
date__lte=parse_month(params['end_date']),
)
filters = []
# we do not add report type filter to each Q below because it seems to slow the query
# down - instead we add a report_type filter later to allow the query to skip some
# partitions
for cred in credentials:
filters.append(Q(organization_id=cred.organization_id, platform_id=cred.platform_id))
if not filters:
raise BadRequest(
"The 'credentials' param must resolve to at least one set of SUSHI credentials."
)
rts = ReportType.objects.filter(
counterreporttype__counterreportstocredentials__credentials__in=credentials
)
qs = qs.filter(reduce(lambda x, y: x | y, filters, Q())).filter(report_type_id__in=rts)
qs = qs.values_list('report_type_id', 'platform_id', 'organization_id', 'date').distinct()
for rec in qs:
records[tuple(rec)] = 'manual'
return Response(
{
'report_type_id': rt,
'platform_id': plat,
'organization_id': org,
'date': date,
'source': source,
}
for (rt, plat, org, date), source in records.items()
)
class ManualDataUploadViewSet(ModelViewSet):
serializer_class = ManualDataUploadSerializer
queryset = ManualDataUpload.objects.all()
permission_classes = [
IsAuthenticated
& ManualDataUploadEnabledPermission
& (
(SuperuserOrAdminPermission & OwnerLevelBasedPermissions)
| (
OwnerLevelBasedPermissions
& CanPostOrganizationDataPermission
& CanAccessOrganizationRelatedObjectPermission
& OrganizationRequiredInDataForNonSuperusers
)
)
]
extra_actions_permission_classes = [
IsAuthenticated
& ManualDataUploadEnabledPermission
& (
(SuperuserOrAdminPermission & OwnerLevelBasedPermissions)
| (
OwnerLevelBasedPermissions
& CanPostOrganizationDataPermission
& CanAccessOrganizationRelatedObjectPermission
)
)
]
@action(methods=['POST'], detail=True, url_path='preflight')
def preflight(self, request, pk):
""" triggers preflight computation """
mdu = get_object_or_404(ManualDataUpload.objects.all(), pk=pk)
if mdu.state == MduState.INITIAL:
# already should be already planned
# just start it in celery right now
mdu.plan_preflight()
return Response({"msg": "generating preflight"})
elif mdu.state in (MduState.PREFLIGHT, MduState.PREFAILED):
# regenerate preflight
if mdu.regenerate_preflight():
return Response({"msg": "regenerating preflight"})
else:
return Response(
{"error": "preflight-trigger-failed"}, status=status.HTTP_400_BAD_REQUEST
)
return Response(
{"error": "can-generate-preflight", "state": mdu.state},
status=status.HTTP_400_BAD_REQUEST,
)
@action(methods=['POST'], detail=True, url_path='import-data')
def import_data(self, request, pk):
mdu = get_object_or_404(ManualDataUpload.objects.all(), pk=pk) # type: ManualDataUpload
if mdu.state == MduState.IMPORTED:
stats = {
'existing logs': AccessLog.objects.filter(
import_batch_id__in=mdu.import_batches.all()
).count()
}
return Response(
{
'stats': stats,
'import_batches': ImportBatchSerializer(
mdu.import_batches.all(), many=True
).data,
}
)
elif mdu.clashing_months:
if clashing_ibs := mdu.clashing_batches():
clashing = ImportBatchVerboseSerializer(clashing_ibs, many=True).data
return Response(
{"error": "data-conflict", "clashing_import_batches": clashing},
status=status.HTTP_409_CONFLICT,
)
elif mdu.state == MduState.IMPORTING:
return Response({"msg": "already importing"})
elif mdu.can_import:
mdu.plan_import(request.user)
return Response({"msg": "import started"})
return Response({"error": "can-not-import"}, status=status.HTTP_400_BAD_REQUEST)
def get_permissions(self):
if self.action in {_action.__name__ for _action in self.get_extra_actions()}:
return [permission() for permission in self.extra_actions_permission_classes]
else:
return super().get_permissions()
class OrganizationManualDataUploadViewSet(ReadOnlyModelViewSet):
"""
This version of the ManualDataUploadViewSet is filtered by organization and offers
a verbose output but is read-only. For a less verbose, read-write access, there
is the 'manual-data-upload' api view that is directly in the API root.
"""
serializer_class = ManualDataUploadVerboseSerializer
queryset = ManualDataUpload.objects.all()
permission_classes = [
IsAuthenticated
& ManualDataUploadEnabledPermission
& (
(SuperuserOrAdminPermission & OwnerLevelBasedPermissions)
| (OwnerLevelBasedPermissions & CanAccessOrganizationRelatedObjectPermission)
)
]
def get_queryset(self):
org_filter = organization_filter_from_org_id(
self.kwargs.get('organization_pk'), self.request.user
)
qs = (
ManualDataUpload.objects.filter(**org_filter)
.select_related('organization', 'platform', 'report_type', 'user')
.prefetch_related('import_batches', 'import_batches__user')
)
# add access level stuff
org_to_level = {} # this is used to cache user access level for the same organization
for mdu in qs: # type: SushiCredentials
if mdu.organization_id not in org_to_level:
org_to_level[mdu.organization_id] = self.request.user.organization_relationship(
mdu.organization_id
)
user_org_level = org_to_level[mdu.organization_id]
mdu.can_edit = user_org_level >= mdu.owner_level
return qs
class CustomDimensionsViewSet(ModelViewSet):
queryset = Dimension.objects.all().order_by('pk')
serializer_class = DimensionSerializer
def get_queryset(self):
organization = get_object_or_404(
self.request.user.accessible_organizations(), pk=self.kwargs.get('organization_pk')
)
try:
source = organization.private_data_source
except DataSource.DoesNotExist:
return Dimension.objects.filter(source__isnull=True)
return source.dimension_set.all().order_by('pk') | Dimension.objects.filter(
source__isnull=True
)
class OrganizationReportTypesViewSet(ModelViewSet):
queryset = ReportType.objects.all()
serializer_class = ReportTypeSerializer
def get_queryset(self):
organization = get_object_or_404(
self.request.user.accessible_organizations(), pk=self.kwargs.get('organization_pk')
)
try:
source = organization.private_data_source
except DataSource.DoesNotExist:
return ReportType.objects.filter(source__isnull=True)
return source.reporttype_set.all() | ReportType.objects.filter(source__isnull=True)
class InterestGroupViewSet(ReadOnlyModelViewSet):
queryset = InterestGroup.objects.all()
serializer_class = InterestGroupSerializer
class FlexibleSlicerBaseView(APIView):
def create_slicer(self, request):
try:
slicer = FlexibleDataSlicer.create_from_params(request.query_params)
slicer.add_extra_organization_filter(request.user.accessible_organizations())
if settings.DEBUG:
pprint(slicer.config())
slicer.check_params()
return slicer
except SlicerConfigError as e:
raise BadRequestException(
{'error': {'message': str(e), 'code': e.code, 'details': e.details}},
)
class FlexibleSlicerView(FlexibleSlicerBaseView):
def get(self, request):
slicer = self.create_slicer(request)
try:
part = request.query_params.get('part') if slicer.split_by else None
if part:
part = parse_b64json(part)
print(part)
data = slicer.get_data(part=part, lang=request.user.language)
except SlicerConfigError as e:
return Response(
{'error': {'message': str(e), 'code': e.code, 'details': e.details}},
status=HTTP_400_BAD_REQUEST,
)
pagination = StandardResultsSetPagination()
page = pagination.paginate_queryset(data, request)
return pagination.get_paginated_response(page)
class FlexibleSlicerPossibleValuesView(FlexibleSlicerBaseView):
def get(self, request):
dimension = request.query_params.get('dimension')
if not dimension:
return Response(
{
'error': {
'message': 'the "dimension" param is required',
'code': SlicerConfigErrorCode.E105,
}
},
status=HTTP_400_BAD_REQUEST,
)
slicer = self.create_slicer(request)
q = request.query_params.get('q')
pks = None
pks_value = request.query_params.get('pks')
if pks_value:
try:
pks = list(map(int, pks_value.split(',')))
except ValueError as e:
return Response({'error': {'message': str(e)}})
return Response(
slicer.get_possible_dimension_values(
dimension, ignore_self=True, text_filter=q, pks=pks
)
)
class FlexibleSlicerSplitParts(FlexibleSlicerBaseView):
MAX_COUNT = 1000
def get(self, request):
slicer = self.create_slicer(request)
qs = slicer.get_parts_queryset()
cropped = False
count = 0
if qs:
count = qs.count()
if count > self.MAX_COUNT:
qs = qs[: self.MAX_COUNT]
cropped = True
return Response({'count': count, "values": qs or [], "cropped": cropped})
class FlexibleReportViewSet(ModelViewSet):
queryset = FlexibleReport.objects.none()
serializer_class = FlexibleReportSerializer
def get_queryset(self):
return FlexibleReport.objects.filter(
Q(owner=self.request.user) # owned by user
| Q(owner__isnull=True, owner_organization__isnull=True) # completely public
| Q(
owner_organization__in=self.request.user.accessible_organizations()
) # assigned to owner's organization
)
def _preprocess_config(self, request):
if 'config' not in request.data:
return None
slicer = FlexibleDataSlicer.create_from_params(request.data.get('config'))
return FlexibleReport.serialize_slicer_config(slicer.config())
def _get_basic_data(self, request):
owner = request.user.pk if 'owner' not in request.data else request.data.get('owner')
return {
'owner': owner,
'owner_organization': (request.data.get('owner_organization')),
'name': request.data.get('name'),
}
def _check_write_permissions(self, request, owner, owner_organization):
# only superuser can set other user as owner
if not (request.user.is_superuser or request.user.is_from_master_organization):
if owner not in (None, request.user.pk):
raise PermissionDenied(f'Not allowed to set owner {owner}')
if owner_organization:
rel = request.user.organization_relationship(owner_organization)
if rel < REL_ORG_ADMIN:
raise PermissionDenied(
f'Not allowed to set owner_organization {owner_organization}'
)
if not owner and not owner_organization:
# this should be consortial access level
if not (request.user.is_superuser or request.user.is_from_master_organization):
raise PermissionDenied(f'Not allowed to create consortial level report')
def create(self, request, *args, **kwargs):
config = self._preprocess_config(request)
if config is None:
return Response(
{'error': 'Missing "config" parameter for the report'}, status=HTTP_400_BAD_REQUEST
)
data = {**self._get_basic_data(request), 'report_config': config}
self._check_write_permissions(request, data['owner'], data['owner_organization'])
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=HTTP_201_CREATED, headers=headers)
def _check_update_permissions(self, request, obj: FlexibleReport, delete=False):
user = request.user
# generic permission to edit based on current access_level
if obj.access_level == FlexibleReport.Level.PRIVATE:
# only owner or superuser may edit
if not (user == obj.owner or user.is_superuser or user.is_from_master_organization):
raise PermissionDenied(f'Not allowed to change private report')
elif obj.access_level == FlexibleReport.Level.ORGANIZATION:
# only admin of owner_organization or superuser may edit
if not (user.is_superuser or user.is_from_master_organization):
rel = request.user.organization_relationship(obj.owner_organization_id)
if rel < REL_ORG_ADMIN:
raise PermissionDenied(f'Not allowed to change organization report')
else:
# only superuser may edit consortium level reports
if not (user.is_superuser or user.is_from_master_organization):
raise PermissionDenied(f'Not allowed to change consortial report')
if not delete:
# now more specific permissions about who can change access level
# we deduce what the owner and owner_organization would be after the update takes place
# and check if the current user is allowed to create such a report
owner = request.data.get('owner') if 'owner' in request.data else obj.owner_id
owner_organization = (
request.data.get('owner_organization')
if 'owner_organization' in request.data
else obj.owner_organization_id
)
self._check_write_permissions(request, owner, owner_organization)
def update(self, request, *args, **kwargs):
"""
Permissions for this view should be:
* private reports (owner != None)
- only owner may see and change
- only if the owner is org admin or superuser he may raise the access level to
organization or consortium
* organization reports (owner_organization != None)
- only admin of organization or superuser may change
- only admin of organization or superuser may change accesslevel
* consortial reports (owner == None and owner_organization == None)
- only superuser may change
- only superuser may change accesslevel
"""
config = self._preprocess_config(request)
report = self.get_object()
self._check_update_permissions(request, report)
data = {**request.data}
if config:
data['report_config'] = config
serializer = self.get_serializer(report, data=data, partial=kwargs.get('partial'))
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=HTTP_200_OK, headers=headers)
def destroy(self, request, *args, **kwargs):
self._check_update_permissions(request, self.get_object(), delete=True)
return super().destroy(request, *args, **kwargs)
| StarcoderdataPython |
4824189 | <reponame>JonathanGailliez/azure-sdk-for-python<filename>azure-keyvault/azure/keyvault/v7_0/models/key_properties_py3.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyProperties(Model):
"""Properties of the key pair backing a certificate.
:param exportable: Indicates if the private key can be exported.
:type exportable: bool
:param key_type: The type of key pair to be used for the certificate.
Possible values include: 'EC', 'EC-HSM', 'RSA', 'RSA-HSM', 'oct'
:type key_type: str or ~azure.keyvault.v7_0.models.JsonWebKeyType
:param key_size: The key size in bits. For example: 2048, 3072, or 4096
for RSA.
:type key_size: int
:param reuse_key: Indicates if the same key pair will be used on
certificate renewal.
:type reuse_key: bool
:param curve: Elliptic curve name. For valid values, see
JsonWebKeyCurveName. Possible values include: 'P-256', 'P-384', 'P-521',
'P-256K'
:type curve: str or ~azure.keyvault.v7_0.models.JsonWebKeyCurveName
"""
_attribute_map = {
'exportable': {'key': 'exportable', 'type': 'bool'},
'key_type': {'key': 'kty', 'type': 'str'},
'key_size': {'key': 'key_size', 'type': 'int'},
'reuse_key': {'key': 'reuse_key', 'type': 'bool'},
'curve': {'key': 'crv', 'type': 'str'},
}
def __init__(self, *, exportable: bool=None, key_type=None, key_size: int=None, reuse_key: bool=None, curve=None, **kwargs) -> None:
super(KeyProperties, self).__init__(**kwargs)
self.exportable = exportable
self.key_type = key_type
self.key_size = key_size
self.reuse_key = reuse_key
self.curve = curve
| StarcoderdataPython |
3233703 | <reponame>rodrigomelo9/uvm-python
#//----------------------------------------------------------------------
#// Copyright 2007-2010 Mentor Graphics Corporation
#// Copyright 2007-2010 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// Copyright 2019 <NAME>
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
from uvm.seq import UVMSequence
from uvm.macros.uvm_object_defines import uvm_object_utils
from uvm.macros.uvm_message_defines import uvm_info
from uvm.macros.uvm_sequence_defines import uvm_do_with
from uvm.base import sv, UVM_HIGH
from ubus_transfer import ubus_transfer, READ, WRITE
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: ubus_base_sequence
#//
#//------------------------------------------------------------------------------
#// This sequence raises/drops objections in the pre/post_body so that root
#// sequences raise objections but subsequences do not.
class ubus_base_sequence(UVMSequence):
def __init__(self, name="ubus_base_seq"):
UVMSequence.__init__(self, name)
self.set_automatic_phase_objection(1)
self.req = ubus_transfer()
self.rsp = ubus_transfer()
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: read_byte
#//
#//------------------------------------------------------------------------------
class read_byte_seq(ubus_base_sequence):
def __init__(self, name="read_byte_seq"):
ubus_base_sequence.__init__(self, name)
self.start_addr = 0
self.rand('start_addr', range((1 << 32) - 1))
self.transmit_delay = 0
# constraint transmit_del_ct { (transmit_del <= 10); }
async def body(self):
self.req.data = [1234]
self.req.addr = self.start_addr
self.req.read_write = READ
self.req.size = 1
self.req.error_pos = 1000
self.req.transmit_delay = self.transmit_delay
await uvm_do_with(self, self.req, lambda addr: addr == self.start_addr)
# { req.addr == start_addr
# req.read_write == READ
# req.size == 1
# req.error_pos == 1000
# req.transmit_delay == transmit_del; } )
rsp = []
await self.get_response(rsp)
self.rsp = rsp[0]
uvm_info(self.get_name(),
sv.sformatf("%s read : addr = `x{}, data[0] = `x{}",
self.get_sequence_path(), self.rsp.addr, self.rsp.data[0]),
UVM_HIGH)
uvm_object_utils(read_byte_seq)
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: read_half_word_seq
#//
#//------------------------------------------------------------------------------
#
#class read_half_word_seq extends ubus_base_sequence
#
# function new(string name="read_half_word_seq")
# super.new(name)
# endfunction
#
# `uvm_object_utils(read_half_word_seq)
#
# rand bit [15:0] start_addr
# rand int unsigned transmit_del = 0
# constraint transmit_del_ct { (transmit_del <= 10); }
#
# virtual task body()
# `uvm_do_with(req,
# { req.addr == start_addr
# req.read_write == READ
# req.size == 2
# req.error_pos == 1000
# req.transmit_delay == transmit_del; } )
# get_response(rsp)
# `uvm_info(get_type_name(),
# $sformatf("%s read : addr = `x%0h, data[0] = `x%0h, data[1] = `x%0h",
# get_sequence_path(), rsp.addr, rsp.data[0], rsp.data[1]), UVM_HIGH)
# endtask
#
#endclass : read_half_word_seq
#
#
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: read_word_seq
#//
#//------------------------------------------------------------------------------
#
#class read_word_seq extends ubus_base_sequence
#
# function new(string name="read_word_seq")
# super.new(name)
# endfunction
#
# `uvm_object_utils(read_word_seq)
#
# rand bit [15:0] start_addr
# rand int unsigned transmit_del = 0
# constraint transmit_del_ct { (transmit_del <= 10); }
#
# virtual task body()
# `uvm_do_with(req,
# { req.addr == start_addr
# req.read_write == READ
# req.size == 4
# req.error_pos == 1000
# req.transmit_delay == transmit_del; } )
# get_response(rsp)
# `uvm_info(get_type_name(),
# $sformatf("%s read : addr = `x%0h, data[0] = `x%0h, \
# data[1] = `x%0h, data[2] = `x%0h, data[3] = `x%0h",
# get_sequence_path(), rsp.addr, rsp.data[0], rsp.data[1],
# rsp.data[2], rsp.data[3]), UVM_HIGH)
# endtask
#
#endclass : read_word_seq
#
#
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: read_double_word_seq
#//
#//------------------------------------------------------------------------------
#
#class read_double_word_seq extends ubus_base_sequence
#
# function new(string name="read_double_word_seq")
# super.new(name)
# endfunction
#
# `uvm_object_utils(read_double_word_seq)
#
# rand bit [15:0] start_addr
# rand int unsigned transmit_del = 0
# constraint transmit_del_ct { (transmit_del <= 10); }
#
# virtual task body()
# `uvm_do_with(req,
# { req.addr == start_addr
# req.read_write == READ
# req.size == 8
# req.error_pos == 1000
# req.transmit_delay == transmit_del; } )
# get_response(rsp)
# `uvm_info(get_type_name(),
# $sformatf("%s read : addr = `x%0h, data[0] = `x%0h, \
# data[1] = `x%0h, data[2] = `x%0h, data[3] = `x%0h, data[4] = `x%0h, \
# data[5] = `x%0h, data[6] = `x%0h, data[7] = `x%0h",
# get_sequence_path(), rsp.addr, rsp.data[0], rsp.data[1], rsp.data[2],
# rsp.data[3], rsp.data[4], rsp.data[5], rsp.data[6], rsp.data[7]),
# UVM_HIGH)
# endtask
#
#endclass : read_double_word_seq
#
#
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: write_byte_seq
#//
#//------------------------------------------------------------------------------
class write_byte_seq(ubus_base_sequence):
last_data = None
def __init__(self, name="write_byte_seq"):
ubus_base_sequence.__init__(self, name)
self.start_addr = 0
self.rand('start_addr', range(1 << 16))
self.data0 = 0
self.transmit_delay = 0
# constraint transmit_del_ct { (transmit_del <= 10); }
async def body(self):
req = ubus_transfer()
req.data.append(self.data0)
req.addr = self.start_addr
req.size = 1
req.error_pos = 1000
req.read_write = WRITE
req.transmit_delay = self.transmit_delay
await uvm_do_with(self, req, lambda addr: addr == self.start_addr,
lambda read_write: read_write == WRITE,
lambda data: data[0] == self.data0)
# { req.addr == start_addr
# req.read_write == WRITE
# req.size == 1
# req.data[0] == data0
# req.error_pos == 1000
# req.transmit_delay == transmit_del; } )
uvm_info(self.get_type_name(),
sv.sformatf("%s wrote : addr = `x%0h, data[0] = `x%0h",
self.get_sequence_path(), req.addr, req.data[0]), UVM_HIGH)
write_byte_seq.last_data = req.data[0]
uvm_object_utils(write_byte_seq)
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: write_half_word_seq
#//
#//------------------------------------------------------------------------------
#
#class write_half_word_seq extends ubus_base_sequence
#
# function new(string name="write_half_word_seq")
# super.new(name)
# endfunction
#
# `uvm_object_utils(write_half_word_seq)
#
# rand bit [15:0] start_addr
# rand bit [7:0] data0
# rand bit [7:0] data1
# rand int unsigned transmit_del = 0
# constraint transmit_del_ct { transmit_del <= 10; }
#
# virtual task body()
# `uvm_do_with(req,
# { req.addr == start_addr;
# req.read_write == WRITE
# req.size == 2;
# req.data[0] == data0; req.data[1] == data1
# req.error_pos == 1000;
# req.transmit_delay == transmit_del; } )
# `uvm_info(get_type_name(),
# $sformatf("%s wrote : addr = `x%0h, data[0] = `x%0h, data[1] = `x%0h",
# get_sequence_path(), req.addr, req.data[0], req.data[1]), UVM_HIGH)
# endtask
#
#endclass : write_half_word_seq
#
#
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: write_word_seq
#//
#//------------------------------------------------------------------------------
#
#class write_word_seq extends ubus_base_sequence
#
# function new(string name="write_word_seq")
# super.new(name)
# endfunction
#
# `uvm_object_utils(write_word_seq)
#
# rand bit [15:0] start_addr
# rand bit [7:0] data0; rand bit [7:0] data1
# rand bit [7:0] data2; rand bit [7:0] data3
# rand int unsigned transmit_del = 0
# constraint transmit_del_ct { (transmit_del <= 10); }
#
# virtual task body()
# `uvm_do_with(req,
# { req.addr == start_addr
# req.read_write == WRITE
# req.size == 4
# req.data[0] == data0; req.data[1] == data1
# req.data[2] == data2; req.data[3] == data3
# req.error_pos == 1000
# req.transmit_delay == transmit_del; } )
# `uvm_info(get_type_name(),
# $sformatf("%s wrote : addr = `x%0h, data[0] = `x%0h, \
# data[1] = `x%0h, data[2] = `x%0h, data[3] = `x%0h",
# get_sequence_path(), req.addr, req.data[0],
# req.data[1], req.data[2], req.data[3]),
# UVM_HIGH)
# endtask
#
#endclass : write_word_seq
#
#
#//------------------------------------------------------------------------------
#//
#// SEQUENCE: write_double_word_seq
#//
#//------------------------------------------------------------------------------
#
#class write_double_word_seq extends ubus_base_sequence
#
# function new(string name="write_double_word_seq")
# super.new(name)
# endfunction
#
# `uvm_object_utils(write_double_word_seq)
#
# rand bit [15:0] start_addr
# rand bit [7:0] data0; rand bit [7:0] data1
# rand bit [7:0] data2; rand bit [7:0] data3
# rand bit [7:0] data4; rand bit [7:0] data5
# rand bit [7:0] data6; rand bit [7:0] data7
# rand int unsigned transmit_del = 0
# constraint transmit_del_ct { (transmit_del <= 10); }
#
# virtual task body()
# `uvm_do_with(req,
# { req.addr == start_addr
# req.read_write == WRITE
# req.size == 8
# req.data[0] == data0; req.data[1] == data1
# req.data[2] == data2; req.data[3] == data3
# req.data[4] == data4; req.data[5] == data5
# req.data[6] == data6; req.data[7] == data7
# req.error_pos == 1000
# req.transmit_delay == transmit_del; } )
# `uvm_info(get_type_name(),
# $sformatf("Writing %s : addr = `x%0h, data[0] = `x%0h, \
# data[1] = `x%0h, data[2] = `x%0h, data[3] = `x%0h, data[4] = `x%0h, \
# data[5] = `x%0h, data[6] = `x%0h, data[7] = `x%0h",
# get_sequence_path(), req.addr, req.data[0], req.data[1], req.data[2],
# req.data[3], req.data[4], req.data[5], req.data[6], req.data[7]),
# UVM_HIGH)
# endtask
#
#endclass : write_double_word_seq
#
#
| StarcoderdataPython |
3317504 | <reponame>carmatthews/VideoIndexer<gh_stars>0
# Get a list of all videos in your account in video indexer - returns the VideoId you need for other operations
#List Videos API: https://api-portal.videoindexer.ai/docs/services/Operations/operations/List-Videos?
import requests
##### CONFIGURE YOUR ACCOUNTS & ACCESS HERE
# Configure access to Video Indexer
video_indexer_account_id = 'REPLACE' # See account settings in Video Indexer Portal: https://www.videoindexer.ai/settings/account
video_indexer_region = 'REPLACE' # At the top of the Video Indexer Portal (https://www.videoindexer.ai), you should see the region (may be trial, WestUS etc.)
video_indexer_key = 'REPLACE' # See Profile in Video Indexer Developer Portal: https://api-portal.videoindexer.ai/developer
####### END CONFIGURATION SECTION
# Get authorization token for Video Indexer API
auth_uri = 'https://api.videoindexer.ai/auth/{}/Accounts/{}/AccessToken'.format(video_indexer_region,video_indexer_account_id)
auth_params = {'allowEdit':'true'}
auth_header = {'Ocp-Apim-Subscription-Key': video_indexer_key}
auth_token = requests.get(auth_uri,headers=auth_header,params=auth_params).text.replace('"','')
print('Getting list...')
# Get list of videos available on Video Indexer API
listvideo_uri = 'https://api.videoindexer.ai/{}/Accounts/{}/Videos'.format(video_indexer_region,video_indexer_account_id)
listvideo_headers = { 'x-ms-client-request-id': '', }
listvideo_params = {
'accessToken':auth_token
}
try:
response = requests.get(listvideo_uri, headers=listvideo_headers, params=listvideo_params)
response_body = response.json()
# Consider any status other than 2xx an error
if not response.status_code // 100 == 2:
print("Error: {} {}".format(response.status_code, response_body))
else:
#DEBUG print(response_body)
for result in response_body["results"]:
print("Video id: {} is for file {} and is {}.".format(result.get('id'), result.get('name'), result.get('state')))
except requests.exceptions.RequestException as e:
# A serious problem happened, like an SSLError or InvalidURL
print("Error: {}".format(e))
| StarcoderdataPython |
3335097 | import copy
import datetime
from kardboard.tests.core import KardboardTestCase
class CardBlockTests(KardboardTestCase):
def setUp(self):
super(CardBlockTests, self).setUp()
self.card = self.make_card()
self.card.save()
self.required_data = {
'reason': 'You gotta lock that down',
'blocked_at': '06/11/1911',
}
self.config['TICKET_HELPER'] = \
'kardboard.tickethelpers.TestTicketHelper'
def tearDown(self):
self.card.delete()
def _get_target_url(self, card=None):
if not card:
card = self.card.key
return '/card/%s/block/' % (card, )
def _get_target_class(self):
return self._get_card_class()
def test_blocking(self):
res = self.app.get(self._get_target_url())
self.assertEqual(200, res.status_code)
self.assertIn('<form', res.data)
self.assertIn(self.card.key, res.data)
self.assertIn(self.card.title, res.data)
def test_blocking_post(self):
self.assertEqual(False, self.card.blocked)
res = self.app.post(self._get_target_url(),
data=self.required_data)
self.assertEqual(302, res.status_code)
self.card.reload()
self.assertEqual(True, self.card.blocked)
self.assertEqual(1, len(self.card.blockers))
self.assertEqual(True, self.card.blocked_ever)
def test_blocking_not_found(self):
url = self._get_target_url("CMS-404")
res = self.app.get(url)
self.assertEqual(404, res.status_code)
def test_blocking_cancel(self):
self.assertEqual(False, self.card.blocked)
res = self.app.post(self._get_target_url(),
data={'cancel': "Cancel"})
self.assertEqual(302, res.status_code)
self.card.reload()
self.assertEqual(False, self.card.blocked)
class CardUnblockTests(KardboardTestCase):
def setUp(self):
super(CardUnblockTests, self).setUp()
self.blocked_at = datetime.datetime(
2011, 6, 12)
self.card = self.make_card()
self.card.save()
self.card.block("Foo", self.blocked_at)
self.card.save()
self.required_data = {
'unblocked_at': '06/13/2011',
}
self.config['TICKET_HELPER'] = \
'kardboard.tickethelpers.TestTicketHelper'
def tearDown(self):
self.card.delete()
def _get_target_url(self, card=None):
if not card:
card = self.card.key
return '/card/%s/block/' % (card, )
def _get_target_class(self):
return self._get_card_class()
def test_unblocking(self):
res = self.app.get(self._get_target_url())
self.assertEqual(200, res.status_code)
self.assertIn('<form', res.data)
self.assertIn(self.card.key, res.data)
self.assertIn(self.card.title, res.data)
def test_unblocking_post(self):
self.assertEqual(True, self.card.blocked)
res = self.app.post(self._get_target_url(),
data=self.required_data)
self.assertEqual(302, res.status_code)
self.card.reload()
self.assertEqual(False, self.card.blocked)
self.assertEqual(True, self.card.blocked_ever)
self.assertEqual(1, len(self.card.blockers))
def test_unblocking_cancel(self):
self.assertEqual(True, self.card.blocked)
res = self.app.post(self._get_target_url(),
data={'cancel': "Cancel"})
self.assertEqual(302, res.status_code)
self.card.reload()
self.assertEqual(True, self.card.blocked)
class CardCRUDTests(KardboardTestCase):
def setUp(self):
super(CardCRUDTests, self).setUp()
self.required_data = {
'key': u'<KEY>',
'title': u'You gotta lock that down',
'backlog_date': u"06/11/1911",
'state': u'Todo',
'team': u'Team 1',
}
self.config['TICKET_HELPER'] = \
'kardboard.tickethelpers.TestTicketHelper'
self.flask_app.config['TICKET_AUTH'] = True
self.login()
def tearDown(self):
super(CardCRUDTests, self).tearDown()
self.flask_app.config['TICKET_AUTH'] = False
self.logout()
def _get_target_url(self):
return '/card/add/'
def _get_target_class(self):
return self._get_card_class()
def test_add_card(self):
klass = self._get_target_class()
res = self.app.get(self._get_target_url())
self.assertEqual(200, res.status_code)
self.assertIn('<form', res.data)
res = self.app.post(self._get_target_url(),
data=self.required_data)
self.assertEqual(302, res.status_code)
self.assertEqual(1, klass.objects.count())
k = klass.objects.get(key=self.required_data['key'])
self.assert_(k.id)
def test_add_card_with_qs_params(self):
key = "<KEY>"
url = "%s?key=%s" % (self._get_target_url(), key)
res = self.app.get(url)
self.assertEqual(200, res.status_code)
self.assertIn('<form', res.data)
self.assertIn('value="%s"' % (key, ), res.data)
def test_add_card_with_no_title(self):
klass = self._get_target_class()
data = copy.copy(self.required_data)
del data['title']
res = self.app.post(self._get_target_url(),
data=data)
self.assertEqual(302, res.status_code)
self.assertEqual(1, klass.objects.count())
# This should work because we mocked TestHelper
# in setUp
k = klass.objects.get(key=self.required_data['key'])
self.assert_(k.id)
self.assertEqual(k.title, "Dummy Title from Dummy Ticket System")
def test_add_duplicate_card(self):
klass = self._get_target_class()
card = klass(**self.required_data)
card.backlog_date = datetime.datetime.now()
card.save()
res = self.app.get(self._get_target_url())
self.assertEqual(200, res.status_code)
self.assertIn('<form', res.data)
res = self.app.post(self._get_target_url(),
data=self.required_data)
self.assertEqual(200, res.status_code)
def test_card_with_and_without_priority(self):
klass = self._get_target_class()
card = klass(**self.required_data)
card.backlog_date = datetime.datetime.now()
card.priority = 2
card.save()
self.required_data['priority'] = u""
target_url = "/card/%s/edit/" % (card.key, )
self.app.post(target_url,
data=self.required_data)
k = klass.objects.get(key=self.required_data['key'])
self.assert_(k.id)
self.assertEqual(k.priority, None)
def test_edit_card(self):
klass = self._get_target_class()
card = klass(**self.required_data)
card.backlog_date = datetime.datetime.now()
card.save()
target_url = "/card/%s/edit/" % (card.key, )
res = self.app.get(target_url)
self.assertEqual(200, res.status_code)
self.assertIn(card.key, res.data)
self.assertIn(card.title, res.data)
res = self.app.post(target_url,
data=self.required_data)
k = klass.objects.get(key=self.required_data['key'])
self.assert_(k.id)
self.assertEqual(302, res.status_code)
self.assertEqual(1, klass.objects.count())
self.assertEqual(6, k.backlog_date.month)
self.assertEqual(11, k.backlog_date.day)
self.assertEqual(1911, k.backlog_date.year)
def test_edit_card_and_redirects(self):
klass = self._get_target_class()
card = klass(**self.required_data)
card.backlog_date = datetime.datetime.now()
card.save()
target_url = "/card/%s/edit/?next=%%2Fcard%%2F%s%%2F" % (card.key, card.key)
res = self.app.get(target_url)
action_url = 'action="%s"' % target_url
self.assertIn(action_url, res.data)
res = self.app.post(target_url,
data=self.required_data)
k = klass.objects.get(key=self.required_data['key'])
self.assertEqual(302, res.status_code)
self.assertEqual('http://localhost/card/%s/' % k.key, res.location)
def test_delete_card(self):
klass = self._get_target_class()
card = klass(**self.required_data)
card.backlog_date = datetime.datetime.now()
card.save()
target_url = "/card/%s/delete/" % (card.key, )
res = self.app.get(target_url)
self.assertEqual(200, res.status_code)
self.assertIn('value="Cancel"', res.data)
self.assertIn('value="Delete"', res.data)
self.assert_(klass.objects.get(key=card.key))
res = self.app.post(target_url, data={'cancel': 'Cancel'})
self.assertEqual(302, res.status_code)
self.assert_(klass.objects.get(key=card.key))
res = self.app.post(target_url, data={'delete': 'Delete'})
self.assertEqual(302, res.status_code)
def login(self):
login_data = {'username': 'username', 'password': 'password'}
self.app.post('/login/', data=login_data)
def logout(self):
self.app.get('/logout/')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.