id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11315400 | import cv2
import os
def start_capture(name):
path = "./data/" + name
num_of_images = 0
detector = cv2.CascadeClassifier("./data/haarcascade_frontalface_default.xml")
try:
os.makedirs(path)
except:
print('Directory Already Created')
vid = cv2.VideoCapture(0)
while True:
ret, img = vid.read()
new_img = None
grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face = detector.detectMultiScale(image=grayimg, scaleFactor=1.1, minNeighbors=5)
for x, y, w, h in face:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2)
cv2.putText(img, "Face Detected", (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255))
cv2.putText(img, str(str(num_of_images)+" images captured"), (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255))
new_img = img[y:y+h, x:x+w]
cv2.imshow("FaceDetection", img)
key = cv2.waitKey(1) & 0xFF
try :
cv2.imwrite(str(path+"/"+str(num_of_images)+name+".jpg"), new_img)
num_of_images += 1
except :
pass
if key == ord("q") or key == 27 or num_of_images > 310:
break
cv2.destroyAllWindows()
return num_of_images
| StarcoderdataPython |
9661148 | <gh_stars>0
'''
Created on July 17, 2014
@author: ckd27546 (Based upon LpdFemGuiLiveViewWindow.py)
'''
from data_containers import LpdImageContainer
from lpd.fem.client import LpdFemClient
from PyQt4 import QtCore, QtGui
from utilities import AsyncExecutionThread
import sys, os, time, datetime
import numpy as np
import h5py
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class LpdFemGuiAsicWindow(QtGui.QDialog):
REALIMAGE = 0
FAULTYIMAGE = 1
THRESHIMAGE = 2
trainSignal = QtCore.pyqtSignal(object)
imageSignal = QtCore.pyqtSignal(object)
moduleSignal = QtCore.pyqtSignal(object)
timeStampSignal = QtCore.pyqtSignal(object)
logPathSignal = QtCore.pyqtSignal(str)
dataSignal = QtCore.pyqtSignal(object, object, object, str, int, int, str)
matplotlib.rcParams.update({'font.size': 8})
def __init__(self, app_main): #, parent=None):
QtGui.QDialog.__init__(self) #, parent)
self.app_main = app_main
self.messageSignal = self.app_main.mainWindow.testTab.messageSignal
self.nrows = 32
self.ncols = 128
self.moduleString = "LHS"
self.moduleNumber = self.app_main.asic_tester.LHS_MODULE
self.setWindowTitle('Plotting data from Asic Module')
self.plotFrame =QtGui.QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((8.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.plotFrame)
self.imageSize = self.nrows * self.ncols
#self.axes = self.fig.add_subplot(111)
self.axes = []
self.img = []
# Obtain default colour map before creating plots
defaultColourMap = cm.get_cmap()
numberPlots = 3
for idx in range(numberPlots):
axesObject = self.fig.add_subplot(numberPlots, 1, idx+1)
self.axes.extend([axesObject])
# Disable up the X and Y ticks
self.axes[idx].set_xticks([])
self.axes[idx].set_yticks([])
# Create an empty plot
self.data = np.zeros((self.nrows, self.ncols), dtype=np.uint16)
# Define colour range, colourmap according to plot number
if idx == 0:
vMax = 4095
cMap = defaultColourMap
cTicks = [0, 511, 1023, 1535, 2047, 2559, 3071, 3583, 4095]
elif idx == 1:
maximum = 2000
vMax = maximum
cMap = 'binary'
cTicks = [0, int(maximum/4), int(maximum/2), int(3*maximum/4), maximum]
else:
maximum = 1
vMax = maximum
cMap = 'binary'
cTicks = [0, maximum]
imgObject = self.axes[idx].imshow(self.data, cmap=cMap, interpolation='nearest', vmin=0, vmax=vMax)
self.img.extend([imgObject])
# Create and show a colourbar
axc, kw = matplotlib.colorbar.make_axes(self.axes[idx], orientation='vertical')
cb = matplotlib.colorbar.Colorbar(axc, self.img[idx], orientation='vertical')
cb.set_ticks(ticks=cTicks, update_ticks=True)
self.img[idx].colorbar = cb
# Add vertical lines to differentiate between the ASICs
for i in range(16, self.ncols, 16):
self.axes[idx].vlines(i-0.5, 0, self.nrows-1, color='b', linestyles='solid')
self.canvas.draw()
# Create the navigation toolbar, tied to the canvas
#
self.mplToolbar = NavigationToolbar(self.canvas, self.plotFrame)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.mplToolbar)
vbox.addWidget(self.canvas)
self.setLayout(vbox)
self.setWindowTitle("Analysis Window")
self.setModal(False)
# Connect the data update signal
self.trainSignal.connect(self.trainUpdate)
self.imageSignal.connect(self.imageUpdate)
self.moduleSignal.connect(self.moduleUpdate)
self.timeStampSignal.connect(self.timeStampUpdate)
self.logPathSignal.connect(self.logPathUpdate)
self.dataSignal.connect(self.windowUpdate)
def closeEvent(self, event):
event.accept()
def trainUpdate(self, train):
self.train = train
def imageUpdate(self, image):
self.image = image
def moduleUpdate(self, module):
self.module = module
def timeStampUpdate(self, timeStamp):
self.timeStamp = timeStamp
def logPathUpdate(self, logPath):
self.logPath = str(logPath)
def setModuleType(self, moduleNumber):
''' Helper function '''
self.moduleNumber = moduleNumber
if moduleNumber == self.app_main.asic_tester.LHS_MODULE: self.moduleString = "LHS"
elif moduleNumber == self.app_main.asic_tester.RHS_MODULE: self.moduleString = "RHS"
else:
self.msgPrint("Error setting module type: Unrecognised module number: %d" % moduleNumber, bError=True)
def windowUpdate(self, lpdActualImage, lpdFaultyImage, lpdThresholdImage, moduleDescription, moduleNumber, thresholdLevel, miscDescription):
#print >> sys.stderr, "moduleDescription ", moduleDescription, " \nmoduleNumber", moduleNumber, "\nmiscDescription", miscDescription
# Convert module number into the string e.g. "RHS"
self.setModuleType(moduleNumber)
# Save module description, e.g. "00135"
self.moduleDescription = moduleDescription
# Plot the captured image
self.img[LpdFemGuiAsicWindow.REALIMAGE].set_data(lpdActualImage)
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(self.timeStamp))
self.titleText = 'Train %d Image %d %sModule %s: %s' % (self.train, self.image, miscDescription+"\n", self.moduleDescription+self.moduleString, dateStr)
self.axes[LpdFemGuiAsicWindow.REALIMAGE].set_title(self.titleText)
# Plot the black/white image (which shows which pixel(s) are dead)
self.img[LpdFemGuiAsicWindow.FAULTYIMAGE].set_data(lpdFaultyImage)
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(self.timeStamp))
self.titleText = 'Module %s: Faulty pixel(s)' % (self.moduleDescription+self.moduleString)
self.axes[LpdFemGuiAsicWindow.FAULTYIMAGE].set_title(self.titleText)
# Plot the black/white image of dead pixels AFTER THRESHOLD APPLIED
self.img[LpdFemGuiAsicWindow.THRESHIMAGE].set_data(lpdThresholdImage)
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(self.timeStamp))
self.titleText = 'Faulty pixel(s) above threshold: %s' % (thresholdLevel)
self.axes[LpdFemGuiAsicWindow.THRESHIMAGE].set_title(self.titleText)
self.canvas.draw()
# Save image to hdf5 file (but not the black/white image)
self.savePlot(lpdActualImage, moduleDescription+self.moduleString)
# Save plotted figure to file
try:
fname = self.logPath + "savedFig_%s_%s" % (moduleDescription+self.moduleString, time.strftime("%H%M%S"))
#print >> sys.stderr, "Fig: %s" % (fname + ".png")
except Exception as e:
self.msgPrint("windowUpdate() Exception: %s" % e, bError=True)
self.fig.savefig(fname)
def savePlot(self, lpdImage, fullModuleName):
try:
fileName = self.logPath + "lpdData_%s_%s.hdf5" % (fullModuleName, time.strftime("%H%M%S"))
#print >> sys.stderr, "HDF5: %s" % (fileName)
except Exception as e:
self.msgPrint("savePlot() Exception: %s" % e, bError=True)
try:
self.hdf_file = h5py.File(fileName, 'w')
except IOError as e:
self.msgPrint("Failed to open HDF file with error: %s" % e, bError=True)
raise(e)
self.nrows = 32
self.ncols = 128
self.images_written = 0
currentImage = 0
# Create group structure
self.lpd_group = self.hdf_file.create_group('lpd')
self.meta_group = self.lpd_group.create_group('metadata')
self.data_group = self.lpd_group.create_group('data')
# Create data group entries
self.image_ds = self.data_group.create_dataset('image', (1, self.nrows, self.ncols), 'uint16', chunks=(1, self.nrows, self.ncols),
maxshape=(None,self.nrows, self.ncols))
self.time_stamp_ds = self.data_group.create_dataset('timeStamp', (1,), 'float64', maxshape=(None,))
self.train_number_ds = self.data_group.create_dataset('trainNumber', (1,), 'uint32', maxshape=(None,))
self.image_number_ds = self.data_group.create_dataset('imageNumber', (1,), 'uint32', maxshape=(None,))
# Write data and info to file
self.image_ds.resize((self.images_written+1, self.nrows, self.ncols))
self.image_ds[self.images_written,...] = lpdImage
self.train_number_ds.resize((self.images_written+1, ))
self.train_number_ds[self.images_written] = 0
self.image_number_ds.resize((self.images_written+1, ))
self.image_number_ds[self.images_written] = currentImage
# Close the file
self.hdf_file.close()
def msgPrint(self, message, bError=False):
''' Send message to LpdFemGuiMainTestTab to be displayed there '''
self.messageSignal.emit(message, bError)
| StarcoderdataPython |
3356452 | """Place holder for future adapter to allow remote access via ssh tunnel.
See Podman go bindings for more details.
"""
from typing import Any, Mapping, Optional, Union
from urllib.parse import urlparse
from requests.adapters import HTTPAdapter
from requests.packages.urllib3 import HTTPConnectionPool # pylint: disable=import-error
from requests.packages.urllib3.connection import HTTPConnection # pylint: disable=import-error
from requests.packages.urllib3.util import Timeout # pylint: disable=import-error
class SSHConnection(HTTPConnection):
"""Specialization of HTTPConnection to use a ssh tunnel."""
def __init__(
self,
host: str,
timeout: Optional[Union[float, Timeout]] = None,
):
"""Instantiate connection to ssh tunnel for Podman service for HTTP client."""
_ = host
_ = timeout
raise NotImplementedError
def connect(self):
"""Returns socket for ssh tunnel."""
raise NotImplementedError
def __del__(self):
"""Cleanup connection."""
raise NotImplementedError
class SSHConnectionPool(HTTPConnectionPool):
"""Specialization of urllib3 HTTPConnectionPool for ssh tunnels."""
# pylint: disable=too-few-public-methods
def __init__(
self,
host: str,
timeout: Optional[Union[float, Timeout]] = None,
) -> None:
if isinstance(timeout, float):
timeout = Timeout.from_float(timeout)
_ = host
def _new_conn(self) -> SSHConnection:
return SSHConnection(self.host, self.timeout)
class SSHAdapter(HTTPAdapter):
"""Specialization of requests transport adapter for ssh tunnels."""
# Abstract methods (get_connection) are specialized and pylint cannot walk hierarchy.
# pylint: disable=arguments-differ
# pylint: disable=too-few-public-methods
def __init__(self, *args, **kwargs):
self.timeout = None
if "timeout" in kwargs:
self.timeout = kwargs.pop("timeout")
super().__init__(*args, **kwargs)
def get_connection(self, host, proxies: Mapping[str, Any] = None) -> SSHConnectionPool:
"""Returns ssh tunneled connection to Podman service."""
if len(proxies) > 0:
uri = urlparse(host)
if uri.scheme in proxies:
raise ValueError(f"{self.__class__.__name__} does not support proxies.")
return SSHConnectionPool(host, timeout=self.timeout)
| StarcoderdataPython |
5030061 | <gh_stars>1-10
import os
import json
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib
from scipy.io import wavfile
from matplotlib import pyplot as plt
matplotlib.use("Agg")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def to_device(data, device):
if len(data) == 9:
(
ids,
raw_texts,
speakers,
texts,
src_lens,
max_src_len,
mels,
mel_lens,
max_mel_len,
) = data
speakers = torch.from_numpy(speakers).long().to(device)
texts = torch.from_numpy(texts).long().to(device)
src_lens = torch.from_numpy(src_lens).to(device)
mels = torch.from_numpy(mels).float().to(device)
mel_lens = torch.from_numpy(mel_lens).to(device)
return (
ids,
raw_texts,
speakers,
texts,
src_lens,
max_src_len,
mels,
mel_lens,
max_mel_len,
)
if len(data) == 6:
(ids, raw_texts, speakers, texts, src_lens, max_src_len) = data
speakers = torch.from_numpy(speakers).long().to(device)
texts = torch.from_numpy(texts).long().to(device)
src_lens = torch.from_numpy(src_lens).to(device)
return (ids, raw_texts, speakers, texts, src_lens, max_src_len)
def log(
logger, step=None, losses=None, fig=None, audio=None, sampling_rate=22050, tag=""
):
if losses is not None:
logger.add_scalar("Loss/total_loss", losses[0], step)
logger.add_scalar("Loss/mel_loss", losses[1], step)
logger.add_scalar("Loss/duration_loss", losses[2], step)
logger.add_scalar("Loss/kl_loss", losses[3], step)
logger.add_scalar("Loss/kl_beta", losses[4], step)
if fig is not None:
logger.add_figure(tag, fig)
if audio is not None:
logger.add_audio(
tag,
audio / max(abs(audio)),
sample_rate=sampling_rate,
)
def get_mask_from_lengths(lengths, max_len=None):
batch_size = lengths.shape[0]
if max_len is None:
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len).unsqueeze(0).expand(batch_size, -1).to(device)
mask = ids >= lengths.unsqueeze(1).expand(-1, max_len)
return mask
def expand(values, durations):
out = list()
for value, d in zip(values, durations):
out += [value] * max(0, int(d))
return np.array(out)
def synth_one_sample(targets, predictions, vocoder, model_config, preprocess_config):
basename = targets[0][0]
mel_len_target = targets[7][0].item()
mel_len_prediction = predictions[2][0].item()
mel_target = targets[6][0, :mel_len_target].detach().transpose(0, 1)
mel_prediction = predictions[0][-1][0, :mel_len_prediction].detach().transpose(0, 1) # Last Iter Mel
attn = predictions[8][0].detach() # [seq_len, mel_len]
W = predictions[9][0].transpose(-2, -1).detach() # [seq_len, mel_len]
fig = plot_mel(
[
mel_prediction.cpu().numpy(),
mel_target.cpu().numpy(),
attn.cpu().numpy(),
W.cpu().numpy()
],
["Synthetized Spectrogram", "Ground-Truth Spectrogram", "Residual Alignment", "W"],
)
if vocoder is not None:
from .model import vocoder_infer
wav_reconstruction = vocoder_infer(
mel_target.unsqueeze(0),
vocoder,
model_config,
preprocess_config,
)[0]
wav_prediction = vocoder_infer(
mel_prediction.unsqueeze(0),
vocoder,
model_config,
preprocess_config,
)[0]
else:
wav_reconstruction = wav_prediction = None
return fig, wav_reconstruction, wav_prediction, basename
def synth_samples(targets, predictions, vocoder, model_config, preprocess_config, path):
basenames = targets[0]
for i in range(len(predictions[0])):
basename = basenames[i]
src_len = predictions[4][i].item()
mel_len = predictions[2][i].item()
mel_prediction = predictions[0][-1][i, :mel_len].detach().transpose(0, 1) # Last Iter Mel
fig = plot_mel(
[
mel_prediction.cpu().numpy(),
None,
None
],
["Synthetized Spectrogram"],
)
plt.savefig(os.path.join(path, "{}.png".format(basename)))
plt.close()
from .model import vocoder_infer
mel_predictions = predictions[0][-1].transpose(1, 2)
lengths = predictions[2] * preprocess_config["preprocessing"]["stft"]["hop_length"]
wav_predictions = vocoder_infer(
mel_predictions, vocoder, model_config, preprocess_config, lengths=lengths
)
sampling_rate = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
for wav, basename in zip(wav_predictions, basenames):
wavfile.write(os.path.join(path, "{}.wav".format(basename)), sampling_rate, wav)
def plot_mel(data, titles):
assert len(data) >= 3, "data must be greater or equal to 2"
if data[-2] is not None and data[-1] is not None:
fig, axes = plt.subplots(len(data), 1, squeeze=False)
if titles is None:
titles = [None for i in range(len(data))]
# Plot Mel Spectrogram
plot_(axes, data[:-2], titles)
# Plot Alignment
xlims = [data[1].shape[1], data[0].shape[1]]
for i in range(-2, 0):
im = axes[i][0].imshow(data[i], origin='lower', aspect='auto')
axes[i][0].set_xlabel('Decoder timestep')
axes[i][0].set_ylabel('Encoder timestep')
axes[i][0].set_xlim(0, xlims[i])
axes[i][0].set_title(titles[i], fontsize="medium")
axes[i][0].tick_params(labelsize="x-small")
axes[i][0].set_anchor("W")
fig.colorbar(im, ax=axes[i][0])
else:
data = data[:-2]
fig, axes = plt.subplots(len(data), 1, squeeze=False)
if titles is None:
titles = [None for i in range(len(data))]
# Plot Mel Spectrogram
plot_(axes, data, titles)
return fig
def plot_(axes, data, titles):
for i in range(len(data)):
mel = data[i]
axes[i][0].imshow(mel, origin="lower")
axes[i][0].set_aspect(2.5, adjustable="box")
axes[i][0].set_ylim(0, mel.shape[0])
axes[i][0].set_title(titles[i], fontsize="medium")
axes[i][0].tick_params(labelsize="x-small", left=False, labelleft=False)
axes[i][0].set_anchor("W")
def pad_1D(inputs, PAD=0):
def pad_data(x, length, PAD):
x_padded = np.pad(
x, (0, length - x.shape[0]), mode="constant", constant_values=PAD
)
return x_padded
max_len = max((len(x) for x in inputs))
padded = np.stack([pad_data(x, max_len, PAD) for x in inputs])
return padded
def pad_2D(inputs, maxlen=None):
def pad(x, max_len):
PAD = 0
if np.shape(x)[0] > max_len:
raise ValueError("not max_len")
s = np.shape(x)[1]
x_padded = np.pad(
x, (0, max_len - np.shape(x)[0]), mode="constant", constant_values=PAD
)
return x_padded[:, :s]
if maxlen:
output = np.stack([pad(x, maxlen) for x in inputs])
else:
max_len = max(np.shape(x)[0] for x in inputs)
output = np.stack([pad(x, max_len) for x in inputs])
return output
def pad(input_ele, mel_max_length=None):
if mel_max_length:
max_len = mel_max_length
else:
max_len = max([input_ele[i].size(0) for i in range(len(input_ele))])
out_list = list()
for i, batch in enumerate(input_ele):
if len(batch.shape) == 1:
one_batch_padded = F.pad(
batch, (0, max_len - batch.size(0)), "constant", 0.0
)
elif len(batch.shape) == 2:
one_batch_padded = F.pad(
batch, (0, 0, 0, max_len - batch.size(0)), "constant", 0.0
)
out_list.append(one_batch_padded)
out_padded = torch.stack(out_list)
return out_padded
| StarcoderdataPython |
1926736 | <filename>test_dirganize.py<gh_stars>1-10
#!/usr/bin/env python
# pylint: skip-file
import logging
import os
import shutil
import click
import pytest
from dirganize import cli
def create_files(folder, *files):
cwd = os.getcwd()
os.chdir(folder)
for file in files:
with open(file, "w"):
pass
os.chdir(cwd)
def test_dirganize():
sb = "sandbox"
try:
shutil.rmtree(sb)
except:
pass
os.makedirs(sb)
files = [
"a.mp4",
"b.mp3",
"c.png",
"d.py",
".dotfile",
"what",
"e.gif",
".dirganize.yml",
]
create_files(sb, *files)
with open(os.path.join(sb, ".dirganize.yml"), "w") as file:
file.write("Animations: [gif]")
with pytest.raises(click.exceptions.Exit):
cli.version_callback(value=True)
cli.verbosity_callback(value=True)
assert set(os.listdir(sb)) == set(files)
cli.main(sb) # cwd changes to sb
assert set(os.listdir()) == set(
[
".dirganize.yml",
".dotfile",
"Images",
"Others",
"Videos",
"Audios",
"Texts",
"Animations",
]
)
assert set(os.listdir("Others")) == set(["what"])
assert set(os.listdir("Animations")) == set(["e.gif"])
assert set(os.listdir("Audios")) == set(["b.mp3"])
if __name__ == "__main__":
test_dirganize()
| StarcoderdataPython |
51249 | #!/usr/bin/python3
import logging
from test_shared import initializeLogs, initializeUartPort, baseOperations
from lib.sim900.smshandler import SimGsmSmsHandler, SimSmsPduCompiler
def printScaPlusPdu(pdu, logger):
# printing SCA+PDU just for debug
d = pdu.compile()
if d is None:
return False
for (sca, pdu, ) in d:
logger.info("sendSms(): sca + pdu = \"{0}\"".format(sca + pdu))
def sendSms(sms, pdu, logger):
# just for debug printing all SCA + PDU parts
printScaPlusPdu(pdu, logger)
if not sms.sendPduMessage(pdu, 1):
logger.error("error sending SMS: {0}".format(sms.errorText))
return False
return True
def main():
"""
Tests SMS sending.
:return: true if everything was OK, otherwise returns false
"""
print("Please, enter phone number")
phone_number = input()
print("Please, enter sms text: ")
sms_text = input()
# logging levels
CONSOLE_LOGGER_LEVEL = logging.INFO
LOGGER_LEVEL = logging.INFO
COMPORT_NAME = "/dev/ttyAMA0"
# WARN: scecify recipient number here!!!
TARGET_PHONE_NUMBER = phone_number
# You can specify SMS center number, but it's not necessary. If you will not specify SMS center number, SIM900
# module will get SMS center number from memory
# SMS_CENTER_NUMBER = "+1 050 123 45 67"
SMS_CENTER_NUMBER = ""
# adding & initializing port object
port = initializeUartPort(portName=COMPORT_NAME)
# initializing logger
(formatter, logger, consoleLogger,) = initializeLogs(LOGGER_LEVEL, CONSOLE_LOGGER_LEVEL)
# making base operations
d = baseOperations(port, logger)
if d is None:
return False
(gsm, imei) = d
# creating object for SMS sending
sms = SimGsmSmsHandler(port, logger)
# ASCII
logger.info("sending sms")
pduHelper = SimSmsPduCompiler(
SMS_CENTER_NUMBER,
TARGET_PHONE_NUMBER,
"{}\n{}".format(
sms_text,
"This is a computer do no reply!"
)
)
if not sendSms(sms, pduHelper, logger):
return False
gsm.closePort()
return True
if __name__ == "__main__":
main()
print("DONE")
| StarcoderdataPython |
5054265 | # demo inspired from http://tour.golang.org/#67
from offset import makechan, select, go, run, maintask
def fibonacci(c, quit):
x, y = 0, 1
while True:
ret = select(c.if_send(x), quit.if_recv())
if ret == c.if_send(x):
x, y = y, x+y
elif ret == quit.if_recv():
print("quit")
return
@maintask
def main():
c = makechan()
quit = makechan()
def f():
for i in range(10):
print(c.recv())
quit.send(0)
go(f)
fibonacci(c, quit)
if __name__ == "__main__":
run()
| StarcoderdataPython |
11365101 | import cv2
import numpy as np
def face_warp(img_src, img_dest, landmarks, landmarks_dest):
original_warp_dict = face_warping(img_src, img_dest, landmarks, landmarks_dest, True)
fake_groundtruth_warp_dict = face_warping(img_src, img_dest, landmarks, landmarks_dest, False)
return {
"original_warp": original_warp_dict["warp"],
"dest_warp": fake_groundtruth_warp_dict["blend"],
}
def get_landmark_index(landmarks_arr, point):
try:
return np.where((landmarks_arr == point).all(axis=1))[0][0]
except Exception:
raise ValueError(f"the point {point} is missing from the landmarks")
def face_warping(img_src_rgb, img_dest_rgb, landmarks, landmarks_dest, add_background_landmarks=False):
"""
:param img_src_rgb:
:param img_dest_rgb:
:param landmarks:
:param landmarks_dest:
:param add_background_landmarks: If True, apply delaunay triangulation on whole image by adding 8 arbitrary border
points. Otherwise, only do triangulation for the face
:return:
"""
img_src_arr = cv2.cvtColor(img_src_rgb, cv2.COLOR_RGB2BGR)
mask = np.zeros(img_src_rgb.shape[:2], dtype=np.uint8)
img_dest_arr = cv2.cvtColor(img_dest_rgb, cv2.COLOR_RGB2BGR)
dest_height, dest_width, channels = img_dest_arr.shape
img_dest_new_face = np.zeros((dest_height, dest_width, channels), np.uint8)
src_height, src_width = img_src_rgb.shape[:2]
if add_background_landmarks:
landmarks = add_background_points(landmarks, src_height, src_width)
convexhull_src = cv2.convexHull(landmarks)
cv2.fillConvexPoly(mask, convexhull_src, 255)
# Delaunay triangulation
rect = cv2.boundingRect(convexhull_src)
subdiv = cv2.Subdiv2D(rect)
subdiv.insert([*landmarks])
triangles = subdiv.getTriangleList()
triangles = np.array(triangles, dtype=np.int32)
# map the mesh's triangles vertices to the landmarks
indexes_triangles = []
for t in triangles:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
index_pt1 = get_landmark_index(landmarks, pt1)
index_pt2 = get_landmark_index(landmarks, pt2)
index_pt3 = get_landmark_index(landmarks, pt3)
if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:
triangle = [index_pt1, index_pt2, index_pt3]
indexes_triangles.append(triangle)
if add_background_landmarks:
landmarks_dest = add_background_points(landmarks_dest, dest_height, dest_width)
convexhull_dest = cv2.convexHull(landmarks_dest)
for triangle_index in indexes_triangles:
tr1_pt1 = landmarks[triangle_index[0]]
tr1_pt2 = landmarks[triangle_index[1]]
tr1_pt3 = landmarks[triangle_index[2]]
triangle1 = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32)
rect1 = cv2.boundingRect(triangle1)
(x, y, w, h) = rect1
x, y, w, h, right, bottom = fix_bounding_rect(x, y, w, h, dest_width, dest_height)
cropped_triangle = img_src_arr[y: bottom, x: right]
cropped_tr1_mask = np.zeros((h, w), np.uint8)
points = np.array(
[[tr1_pt1[0] - x, tr1_pt1[1] - y],
[tr1_pt2[0] - x, tr1_pt2[1] - y],
[tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32
)
cv2.fillConvexPoly(cropped_tr1_mask, points, 255)
# Triangulation of second face
tr2_pt1 = landmarks_dest[triangle_index[0]]
tr2_pt2 = landmarks_dest[triangle_index[1]]
tr2_pt3 = landmarks_dest[triangle_index[2]]
triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32)
rect2 = cv2.boundingRect(triangle2)
(x, y, w, h) = rect2
x, y, w, h, right, bottom = fix_bounding_rect(x, y, w, h, dest_width, dest_height)
cropped_tr2_mask = np.zeros((h, w), np.uint8)
points2 = np.array(
[[tr2_pt1[0] - x, tr2_pt1[1] - y],
[tr2_pt2[0] - x, tr2_pt2[1] - y],
[tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32
)
cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)
# Warp triangles
points = np.float32(points)
points2 = np.float32(points2)
rotation_matrix = cv2.getAffineTransform(points, points2)
try:
warped_triangle = cv2.warpAffine(cropped_triangle, rotation_matrix, (w, h))
except:
# invalid shape, one of the dimension is 0, it can just be ignored
continue
warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=cropped_tr2_mask)
# Reconstructing destination face
img_dest_new_face_rect_area = img_dest_new_face[y: bottom, x: right]
img_dest_new_face_rect_area_gray = cv2.cvtColor(img_dest_new_face_rect_area, cv2.COLOR_BGR2GRAY)
_, mask_triangles_designed = cv2.threshold(img_dest_new_face_rect_area_gray, 1, 255, cv2.THRESH_BINARY_INV)
warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=mask_triangles_designed)
img_dest_new_face_rect_area = cv2.add(img_dest_new_face_rect_area, warped_triangle)
img_dest_new_face[y: bottom, x: right] = img_dest_new_face_rect_area
# Face swapped (putting 1st face into 2nd face)
img_dest_face_mask = np.zeros(img_dest_rgb.shape[:2], dtype=np.uint8)
img_dest_head_mask = cv2.fillConvexPoly(img_dest_face_mask, convexhull_dest, 255)
img_dest_face_mask = cv2.bitwise_not(img_dest_head_mask)
img_dest_head_noface = cv2.bitwise_and(img_dest_arr, img_dest_arr, mask=img_dest_face_mask)
result = cv2.add(img_dest_head_noface, img_dest_new_face)
(x, y, w, h) = cv2.boundingRect(convexhull_dest)
x, y, w, h, right, bottom = fix_bounding_rect(x, y, w, h, dest_width, dest_height)
center_face_dest = ((x + right) // 2, (y + bottom) // 2)
try:
seamlessclone = cv2.seamlessClone(result, img_dest_arr, img_dest_head_mask, center_face_dest, cv2.NORMAL_CLONE)
except Exception as e:
raise e
return {
"warp": cv2.cvtColor(result, cv2.COLOR_BGR2RGB),
"blend": cv2.cvtColor(seamlessclone, cv2.COLOR_BGR2RGB),
}
def fix_bounding_rect(x, y, b_w, b_h, width, height):
right = np.clip(x + b_w, 0, width)
bottom = np.clip(y + b_h, 0, height)
x = np.clip(x, 0, width)
y = np.clip(y, 0, height)
return x, y, right - x, bottom - y, right, bottom
def add_background_points(landmarks_arr, height, width):
landmarks_arr = np.append(
landmarks_arr,
[
[0, 0],
[width - 1, 0],
[(width - 1) // 2, 0],
[0, height - 1],
[0, (height - 1) // 2],
[(width - 1) // 2, height - 1],
[width - 1, height - 1],
[(width - 1), (height - 1) // 2]
],
axis=0,
)
return landmarks_arr
| StarcoderdataPython |
268387 | <filename>main.py<gh_stars>1-10
# -*- coding:utf-8 -*-
import os.path as osp
import torch
import torch.nn.functional as F
import numpy as np
from utils import load_data
from model import CGNN
import argparse
def train(data, model, optimizer):
model.train()
optimizer.zero_grad()
pred = model(data)
loss = F.nll_loss(pred[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
def val(data, model):
model.eval()
logits, accs = model(data), []
for _, mask in data('train_mask', 'val_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
val_mask = data.val_mask
accs.append(F.nll_loss(model(data)[val_mask], data.y[val_mask]))
return accs
def main(args, d_input, d_output):
test_acc_list = []
for i in range(args.num_expriment):
data = load_data(args.data_path, args.dataset)
data, model = globals()[args.model].call(data, args, d_input, d_output)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
best_val_acc = test_acc = 0.0
best_val_loss = np.inf
wait_step = 0
##########################
val_loss_list = []
tem_test_acc_list = []
for epoch in range(0, args.epochs):
train(data, model, optimizer)
train_acc, val_acc, tmp_test_acc, val_loss = val(data, model)
##########################
val_loss_list.append(val_loss.item())
tem_test_acc_list.append(tmp_test_acc)
if val_acc >= best_val_acc or val_loss <= best_val_loss:
if val_acc >= best_val_acc:
test_acc = tmp_test_acc
early_val_acc = val_acc
early_val_loss = val_loss
best_val_acc = np.max((val_acc, best_val_acc))
best_val_loss = np.min((val_loss, best_val_loss))
wait_step = 0
else:
wait_step += 1
if wait_step == args.early_stop:
print('Early stop! Min loss: ', best_val_loss, ', Max accuracy: ', best_val_acc)
print('Early stop model validation loss: ', early_val_loss, ', accuracy: ', early_val_acc)
break
log = 'Model_type: {}, Dateset_name: {}, Experiment: {:03d}, Test: {:.6f}'
print(log.format(args.model_type, args.dataset, i + 1, test_acc))
test_acc_list.append(test_acc * 100)
log = 'Model_type: {}, Dateset_name: {}, Experiments: {:03d}, Mean: {:.6f}, std: {:.6f}\n'
print(log.format(args.model_type, args.dataset, args.num_expriment, np.mean(test_acc_list),
np.std(test_acc_list)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CGNN')
parser.add_argument('--data_path', type=str, help="Path of saved processed data files.", default='./data')
parser.add_argument('--dataset', type=str, help="Name of the datasets", required=True)
parser.add_argument('--NCTM', type=str, choices=['linear', 'exp'],
help="Type of Negative Curvature Transformation Module", required=True)
parser.add_argument('--CNM', type=str, choices=['symmetry-norm', '1-hop-norm', '2-hop-norm'],
help="Type of Curvature Normalization Module", required=True)
parser.add_argument('--d_hidden', type=int, help="Dimension of the hidden node features", default=64)
parser.add_argument('--epochs', type=int, help="The maximum iterations of training", default=200)
parser.add_argument('--num_expriment', type=int, help="The number of the repeating expriments", default=50)
parser.add_argument('--early_stop', type=int, help="Early stop", default=20)
parser.add_argument('--dropout', type=float, help="Dropout", default=0.5)
parser.add_argument('--lr', type=float, help="Learning rate", default=0.005)
parser.add_argument('--weight_decay', type=float, help="Weight decay", default=0.0005)
args = parser.parse_args()
datasets_config = {
'Cora': {'d_input': 1433,
'd_output': 7},
'Citeseer': {'d_input': 3703,
'd_output': 6},
'PubMed': {'d_input': 500,
'd_output': 3},
'CS': {'d_input': 6805,
'd_output': 15},
'Physics': {'d_input': 8415,
'd_output': 5},
'Computers': {'d_input': 767,
'd_output': 10},
'Photo': {'d_input': 745,
'd_output': 8},
'WikiCS': {'d_input': 300,
'd_output': 10},
}
args.model = 'CGNN'
args.model_type = 'CGNN_{}_{}_{}'.format(args.NCTM, args.CNM, args.dropout)
d_input, d_output = datasets_config[args.dataset]['d_input'], datasets_config[args.dataset]['d_output']
main(args, d_input, d_output)
| StarcoderdataPython |
1638140 | <filename>Pacote Download/Ex039_12_Alistamento_Militar.py
#programa que leia o ano de nascimento de um jovem e informe:
# Se ele ainda vai se alistar no serviço militar
# Se é a hora de se alistar
# Se já passou o tempo do alistamento
# O programa tb deverá mostrar o tempo que falta ou tempo q passou para o alistamento
import datetime
# from datetime import date
# ano_nasc = int(input('Digite o ano do seu nascimento (Ex: 1988): '))
# ano_atual = date.today().year
# calculo = ano_atual - ano_nasc
# if ano_atual - ano_nasc == 18:
# print('Você está com {} anos, está no tempo certo de se alistar'.format(calculo))
# elif ano_atual - ano_nasc < 18:
# print('Você está com {} anos, ainda falta(m) {} ano(s) para o alistamento.'.format(calculo, (18 - calculo)))
# elif ano_atual - ano_nasc > 18:
# print('Você está com {} anos, já passou/passaram {} ano(s) do alistamento'.format(calculo, (calculo - 18)))
#Jeito do professor
from datetime import date
atual = date.today().year
nasc = int(input('Ano de nascimento: '))
idade = atual - nasc
print('Quem nasceu em {} tem {} anos em {}.'.format(nasc, idade, atual))
if idade == 18:
print('Você tem que se alistar imediatamente')
elif idade < 18:
saldo = 18 - idade
print('Ainda falta(m) {} ano(s) para o alistamento'.format(saldo))
ano = atual + saldo
print('Seu alistamento será em {}.'.format(ano))
elif idade > 18:
saldo = idade - 18
print('Você já deveria ter se alistado há {} ano(s).'.format(saldo))
ano = atual - saldo
print('Seu alistamento foi em {}.'.format(ano))
| StarcoderdataPython |
3281011 | <reponame>researchworking/ScalarEMLP<filename>experiments/scalars_nn.py
import torch.nn as nn
import torch
from torch.utils.data import TensorDataset
import numpy as np
import itertools
def comp_inner_products(x, stype, simplified=True):
"""
INPUT:
N: number of datasets
n: number of particles
dim: dimension of each particle
x: torch tensor of size [N, n, dim]
stype: "Euclidean" or "Minkowski"
"""
_, n, d = x.shape
if stype == "Euclidean":
scalars = torch.einsum('bix,bjx->bij', x, x)
elif stype == "Minkowski":
G = torch.diag(-torch.ones(d))
G[0,0] = 1
G = torch.einsum('bix,bxj->bij', x, G.unsqueeze(0))
scalars = torch.einsum('bij,bkj->bik', G, x)
if simplified:
scalars = torch.triu(scalars).view(-1, n**2)
scalars = scalars[:, torch.nonzero(scalars[0]).squeeze(-1)]
return scalars
def comp_outer_products(x):
N = x.shape[0]
scalars = torch.einsum('bik,bjl->bijkl', x, x) #[N, n, n, dim, dim]
return scalars.view(N,-1)
def dataset_transform(data):
"""
data: numpy dataset of two attributes: data.X, data.Y
"""
X = torch.from_numpy(data.X)
Y = torch.from_numpy(data.Y)
if data.symname == "O5invariant":
X = X.reshape(-1,2,5)
scalars = comp_inner_products(X, stype="Euclidean")
elif data.symname == "O3equivariant":
n=5 # five data points
mi = X[:,:n]
ri = X[:,n:].reshape(-1,n,3)
x_outer = torch.einsum('bik,bjl->bijkl', ri, ri) #[N, n, n, dim, dim]
x_inner = torch.einsum('bik,bjk->bij', ri, ri) #[N, n, n]
index = np.array(list(itertools.combinations_with_replacement(np.arange(0,n), r=2)))
N = len(X)
X = X.new_zeros(N, 16, 3, 3)
X[:,:15,:,:] = x_outer[:,index[:,0],index[:,1],:,:]
X[:,-1,:,:] = torch.eye(3).repeat(N,1,1)
ri = x_inner[:,index[:,0], index[:,1]] # [N, 15]
mi1 = mi[:,index[:,0]]
mi2 = mi[:,index[:,1]]
scalars = torch.stack((mi1,mi2,ri),dim=-1) # [N, 15, 3]
elif data.symname == "Lorentz":
X = X.reshape(-1,4,4)
scalars = comp_inner_products(X, stype="Minkowski")
else:
raise ValueError("Wrong symname???")
dim_scalars = scalars.shape[-1]
return {'dataset': TensorDataset(scalars, X, Y), 'dim_scalars': dim_scalars}
class BasicMLP(nn.Module):
def __init__(
self,
n_in,
n_out,
n_hidden=100,
n_layers=2,
layer_norm=True
):
super().__init__()
layers = [nn.Linear(n_in, n_hidden), nn.ReLU()]
for _ in range(n_layers):
layers.append(nn.Linear(n_hidden, n_hidden))
layers.append(nn.ReLU())
layers.append(nn.Linear(n_hidden, n_out))
if layer_norm:
layers.append(nn.LayerNorm(n_out))
self.mlp = nn.Sequential(*layers)
def forward(self, x):
return self.mlp(x)
class EquivariancePermutationLayer(nn.Module):
def __init__(
self,
n_in,
n_hidden,
n_layers,
layer_norm,
):
super(self.__class__, self).__init__()
self.f_Mij = BasicMLP(
n_in=4,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Mii = BasicMLP(
n_in=4,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Mrij = BasicMLP(
n_in=15,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Mrii = BasicMLP(
n_in=15,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Mmij = BasicMLP(
n_in=5,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Mmii = BasicMLP(
n_in=5,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Mmimj = BasicMLP(
n_in=1,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Mmimi = BasicMLP(
n_in=1,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Iij = BasicMLP(
n_in=2,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Iii = BasicMLP(
n_in=2,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Imij = BasicMLP(
n_in=1,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.f_Imii = BasicMLP(
n_in=1,
n_out=1,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.n_in = n_in
def forward(self, x):
scalars, x = x
## Indentity matrix
inputIij = torch.cat(
(
scalars[:,5:,-1].unsqueeze(-1),
self.f_Imij(scalars[:,5:,0].unsqueeze(-1)) + self.f_Imij(scalars[:,5:,1].unsqueeze(-1))
),
dim=-1
) # [_, 10, 2]
inputIii = torch.cat(
(
scalars[:,:5,-1].unsqueeze(-1),
self.f_Imii(scalars[:,:5,0].unsqueeze(-1))
),
dim=-1
) # [_, 5, 2]
outI = torch.sum(self.f_Iij(inputIij), dim=1, keepdim=True) # [_, 1, 1]
outI += torch.sum(self.f_Iii(inputIii), dim=1, keepdim=True) # [_, 1, 1]
outI = outI * x[:,15,:,:] # [_, 3, 3]
## Matrices of different particles and same particles
inputMij = torch.cat(
(
scalars[:,5:,-1].unsqueeze(-1),
self.f_Mmimj(scalars[:,5:,0].unsqueeze(-1)) + self.f_Mmimj(scalars[:,5:,1].unsqueeze(-1)),
torch.sum(self.f_Mrij(scalars[:,:,-1]), dim=-1, keepdim=True).repeat(1,10).unsqueeze(-1),
torch.sum(self.f_Mmij(scalars[:,:5,0]), dim=-1, keepdim=True).repeat(1,10).unsqueeze(-1),
),
dim=-1
) # [_, 10, 4]
inputMii = torch.cat(
(
scalars[:,:5,-1].unsqueeze(-1),
self.f_Mmimi(scalars[:,:5,0].unsqueeze(-1)),
torch.sum(self.f_Mrii(scalars[:,:,-1]), dim=-1, keepdim=True).repeat(1,5).unsqueeze(-1),
torch.sum(self.f_Mmii(scalars[:,:5,0]), dim=-1, keepdim=True).repeat(1,5).unsqueeze(-1),
),
dim=-1
) # [_, 5, 4]
outMii = self.f_Mii(inputMii).squeeze(-1) # [_, 5, 1]
outMii = torch.einsum('bi,bijk->bjk', outMii, x[:,:5,:,:]) # [_, 3, 3]
outMij = self.f_Mij(inputMij).squeeze(-1) # [_, 10, 1]
outMij = torch.einsum('bi,bijk->bjk', outMij, x[:,5:15,:,:]) # [_, 3, 3]
return (outI+outMij+outMii).view(-1,9) # [_, 9]
class EquivarianceLayer(nn.Module):
def __init__(
self,
n_in,
n_hidden,
n_layers,
layer_norm,
):
super(self.__class__, self).__init__()
self.f = BasicMLP(
n_in=n_in,
n_out=16,
n_hidden=n_hidden,
n_layers=n_layers,
layer_norm=layer_norm,
)
self.n_in = n_in
def forward(self, x):
scalars, x = x
scalars = torch.cat((scalars[:,:,-1],scalars[:,:5,0]),dim=-1) # [_, 20]
out = torch.sum(self.f(scalars).unsqueeze(-1).unsqueeze(-1) * x, dim=1) # [_, 3, 3]
# out = torch.einsum('bi,bijk->bjk', self.f(scalars), x) # [_, 3, 3]
return out.view(-1,9) # [_, 9]
| StarcoderdataPython |
3294048 | <reponame>DploY707/AST_parser<gh_stars>1-10
import networkx as nx
import matplotlib.pyplot as plt
from core.utils import Color
from core.utils import set_string_colored
from core.parser import ConstData
from core.parser import stmtList
from core.parser import actionList
from core.parser import dataList
class ASTGraph():
def __init__(self, nodeList, edgeList, config = None):
self.nodeList = nodeList
self.edgeList = edgeList
self.graph = None
self.config = config
def graph_initialize(self, encode_flag = False):
if encode_flag and self.config.MAX_NODE_COUNT < len(self.nodeList):
print(set_string_colored('Increase the graph configuration for MAX_NODE_COUNT', Color.RED.value))
return
self.graph = nx.Graph()
if not encode_flag:
# for node in self.nodeList:
# self.graph.add_node(node)
for edge in self.edgeList:
if edge.pIndex == -1:
self.graph.add_edge(
str(edge.pIndex) + ':' + 'ROOT'
, str(edge.cIndex) + ':' + self.nodeList[edge.cIndex].nodeInfo.type)
else:
if type(self.nodeList[edge.cIndex].nodeInfo) != type(ConstData(None, None)):
self.graph.add_edge(
str(edge.pIndex) + ':' + self.nodeList[edge.pIndex].nodeInfo.type
, str(edge.cIndex) + ':' + self.nodeList[edge.cIndex].nodeInfo.type)
else:
self.graph.add_edge(
str(edge.pIndex) + ':' + self.nodeList[edge.pIndex].nodeInfo.type
, str(edge.cIndex) + ':' + self.nodeList[edge.cIndex].nodeInfo.type)
else:
for edge in self.edgeList:
if edge.pIndex == -1:
self.graph.add_edge(
str(self.cal_labeled_index(edge.pIndex)) + ':' + 'ROOT'
, str(self.cal_labeled_index(edge.cIndex)) + ':' + self.nodeList[edge.cIndex].nodeInfo.type)
else:
if type(self.nodeList[edge.cIndex].nodeInfo) != type(ConstData(None, None)):
self.graph.add_edge(
str(self.cal_labeled_index(edge.pIndex)) + ':' + self.nodeList[edge.pIndex].nodeInfo.type
, str(self.cal_labeled_index(edge.cIndex)) + ':' + self.nodeList[edge.cIndex].nodeInfo.type)
else:
self.graph.add_edge(
str(self.cal_labeled_index(edge.pIndex)) + ':' + self.nodeList[edge.pIndex].nodeInfo.type
, str(self.cal_labeled_index(edge.cIndex)) + ':' + self.nodeList[edge.cIndex].nodeInfo.type)
def cal_labeled_index(self, index):
nodeType = self.nodeList[index].nodeInfo.type
if nodeType in stmtList:
return stmtList.index(nodeType) * self.config.TYPE_WINDOW_SIZE + index
elif nodeType in actionList:
return (actionList.index(nodeType) + 20) * self.config.TYPE_WINDOW_SIZE + index
elif nodeType in dataList:
return (dataList.index(nodeType) + 40) * self.config.TYPE_WINDOW_SIZE + index
else:
print(set_string_colored('UNKNOWN TYPE NODE...', Color.RED.value))
def get_numeric_node_graph(self):
if self.graph is None:
print(set_string_colored('Graph Should be initialized first...', Color.RED.value))
def draw_graph(self):
nx.draw(self.graph, pos=nx.drawing.nx_agraph.graphviz_layout(self.graph, prog='dot'), with_labels=True)
print(nx.info(self.graph))
def save_graph_as_png(self, savePath):
plt.figure()
self.draw_graph()
plt.axis('off')
plt.savefig('/')
plt.show()
plt.savefig(savePath, format="PNG")
class GraphConfig():
def __init__(self, MAX_NODE_COUNT, TYPE_WINDOW_SIZE):
self.MAX_NODE_COUNT = MAX_NODE_COUNT
self.TYPE_WINDOW_SIZE = TYPE_WINDOW_SIZE | StarcoderdataPython |
200596 | <reponame>oferby/networking-bagpipe
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networking_bagpipe.bagpipe_bgp.engine import exa
def prefix_to_packed_ip_mask(prefix):
ip_string, mask = prefix.split("/")
return (exa.IP.pton(ip_string), int(mask))
@exa.NLRI.register(exa.AFI.ipv4, exa.SAFI.mpls_vpn, force=True)
@exa.NLRI.register(exa.AFI.ipv6, exa.SAFI.mpls_vpn, force=True)
class IPVPN(exa.IPVPN):
# two NLRIs with same RD and prefix, but different labels need to
# be equal and have the same hash
def __eq__(self, other):
return self.rd == other.rd and self.cidr == other.cidr
def __hash__(self):
return hash((self.rd, self.cidr._packed))
def IPVPNRouteFactory(afi, prefix, label, rd, nexthop):
packed_prefix, mask = prefix_to_packed_ip_mask(prefix)
return IPVPN.new(afi, exa.SAFI(exa.SAFI.mpls_vpn), packed_prefix, mask,
exa.Labels([label], True), rd, nexthop)
| StarcoderdataPython |
6410037 | import os
from typing import Iterable, Mapping, Optional, Union
import gin
from kblocks.gin_utils.config import fix_bindings, fix_paths
from kblocks.gin_utils.path import enable_relative_includes, enable_variable_expansion
_GIN_SUMMARY = """
# --cwd={cwd}
# --incl_rel={incl_rel}
# --expand_vars={expand_vars}
# -------------------
# CONFIG FILES
{config_files}
# -------------------
# BINDINGS
{bindings}
"""
class GinSummary:
"""
Class for summarizing gin configuration.
Args:
cwd: current working directory.
incl_rel: True indicates `enable_relative_includes` should be called.
expand_vars: True indicates `enable_variable_expansion` should be
called.
config_files: path or paths to config files to be included.
bindings: additional bindings to be set after files are parsed.
"""
def __init__(
self,
cwd: Optional[str] = None,
incl_rel: bool = True,
expand_vars: bool = True,
config_files: Union[str, Iterable[str]] = (),
bindings: Union[str, Iterable[str]] = (),
):
self.cwd = os.getcwd() if cwd is None else cwd
self.incl_rel = incl_rel
self.expand_vars = expand_vars
self.config_files = fix_paths(config_files)
self.bindings = fix_bindings(bindings)
def get_config(self):
"""Return a dictionary representation of this object."""
return dict(
cwd=self.cwd,
incl_rel=self.incl_rel,
expand_vars=self.expand_vars,
config_files=self.config_files,
bindings=self.bindings,
)
@classmethod
def from_config(cls, config: Mapping[str, Union[bool, str]]):
return cls(**config)
def pretty_format(self):
"""Multi-line human readable string representation."""
config = self.get_config()
files = config["config_files"]
assert isinstance(files, list)
config["config_files"] = "\n".join(files)
return _GIN_SUMMARY.format(**config)
def enable_path_options(self):
"""Enable relative/expansion options depending on constructor values."""
if self.incl_rel:
enable_relative_includes()
if self.expand_vars:
enable_variable_expansion()
def parse(self, finalize: bool = True):
"""Parse files/bindings provided in constructor."""
gin.parse_config_files_and_bindings(
self.config_files, self.bindings, finalize_config=finalize
)
| StarcoderdataPython |
11237310 | <filename>tests/unit-tests/test_validations.py
import os
import imp
import sys
import testtools
from mock import patch
from cloudify.mocks import MockCloudifyContext
validate = imp.load_source(
'validate', os.path.join(
os.path.dirname(__file__),
'../../components/manager/scripts/validate.py'))
os_distro = ('distro', '1')
TEST_SERVICE_NAME = 'service'
class MockNodeProperties(dict):
def get_all(self):
return self
def _create_mock_context(install_node_props,
node_id='es_node',
service=TEST_SERVICE_NAME):
mock_node_props = MockNodeProperties(install_node_props)
return MockCloudifyContext(node_id=node_id,
node_name=service,
properties=mock_node_props)
class TestValidations(testtools.TestCase):
node_properties = {
'ignore_bootstrap_validations': False,
'manager_resources_package': 'http://non-existing-domain.com/package',
'minimum_required_total_physical_memory_in_mb': 3792,
'minimum_required_available_disk_space_in_gb': 5
}
CTX = _create_mock_context(node_properties, node_id='node', service='test')
node_properties.update({'ignore_bootstrap_validations': 'True'})
IGNORE_VALIDATIONS_CTX = _create_mock_context(
node_properties, node_id='_node', service='test')
@patch('validate.ctx', CTX)
@patch('validate._get_os_distro', return_value=('redhat', '7'))
@patch('validate._get_host_total_memory', return_value=100000000)
@patch('validate._get_available_host_disk_space', return_value=100)
@patch('validate._validate_resources_package_url', return_value=None)
@patch('validate._validate_openssl_version', return_value=None)
def test_successful_validation(self, *_):
validate.validate()
@patch('validate.ctx', IGNORE_VALIDATIONS_CTX)
@patch('validate._get_os_distro', return_value=os_distro)
@patch('validate._get_host_total_memory', return_value=1)
@patch('validate._get_available_host_disk_space', return_value=1)
@patch('validate._validate_resources_package_url', return_value=None)
@patch('validate._get_python_version', return_value=(8, 8))
def test_failed_yet_ignored_validation(self, *_):
validate.validate()
@patch('validate.ctx', CTX)
@patch('validate._get_os_distro', return_value=os_distro)
@patch('validate._get_host_total_memory', return_value=1)
@patch('validate._get_available_host_disk_space', return_value=1)
def test_failed_validation(self, *_):
# print self.CTX.get_all()
validate.ctx.abort_operation = lambda message: sys.exit(message)
ex = self.assertRaises(SystemExit, validate.validate)
self.assertIn(
validate._error('Cloudify Manager requires'),
str(ex))
self.assertIn(
validate._error('The provided host does not have enough memory'),
str(ex))
self.assertIn(
validate._error('The provided host does not have enough disk'),
str(ex))
self.assertIn(
validate._error(
"The Manager's Resources Package "
"http://non-existing-domain.com/package"),
str(ex))
def test_fail_validate_resources_package_url(self):
test_url = 'http://non-existent-domain.com/non-existent-file.tar.gz'
error = validate._validate_resources_package_url(test_url)
desired_error = (validate._error(
"The Manager's Resources Package {0} is not accessible "
"(HTTP Error: {1})".format(test_url, '404')))
self.assertEqual(desired_error, error)
@patch('validate.ctx', CTX)
@patch('validate._get_os_distro', return_value=os_distro)
def test_validate_supported_distros_ok(self, _):
error = validate._validate_supported_distros(['distro'], ['1'])
self.assertIsNone(error)
@patch('validate.ctx', CTX)
@patch('validate._get_os_distro', return_value=os_distro)
def _test_fail_validate_supported_distros(self, _, distros, versions):
current_distro, current_version = validate._get_os_distro()
error = validate._validate_supported_distros(distros, versions)
desired_error = 'Manager requires either '
self.assertIn(desired_error, error)
def test_fail_validate_supported_distros_bad_distro(self):
self._test_fail_validate_supported_distros(['bla'], ['1'])
def test_fail_validate_supported_distros_bad_version(self):
self._test_fail_validate_supported_distros(['distro'], ['2'])
def test_fail_validate_supported_distros_bad_version_and_distro(self):
self._test_fail_validate_supported_distros(['bla'], ['2'])
@patch('validate.ctx', CTX)
@patch('validate._get_host_total_memory', return_value=1023)
def test_fail_validate_physical_memory(self, _):
error = validate._validate_sufficient_memory(1024)
desired_error = validate._error(
'The provided host does not have enough memory')
self.assertIn(desired_error, error)
@patch('validate.ctx', CTX)
@patch('validate._get_host_total_memory', return_value=1024)
def test_validate_edgy_physical_memory(self, _):
error = validate._validate_sufficient_memory(1024)
self.assertIsNone(error)
@patch('validate.ctx', CTX)
def test_validate_physical_memory(self):
error = validate._validate_sufficient_memory(1)
self.assertIsNone(error)
@patch('validate.ctx', CTX)
@patch('validate._get_available_host_disk_space', return_value=1)
def test_fail_validate_available_disk_space(self, _):
error = validate._validate_sufficient_disk_space(2)
desired_error = validate._error(
'The provided host does not have enough disk space')
self.assertIn(desired_error, error)
@patch('validate.ctx', CTX)
def test_validate_available_disk_space(self):
error = validate._validate_sufficient_disk_space(1)
self.assertIsNone(error)
@patch('validate.ctx', CTX)
@patch('validate._get_python_version', return_value=(2, 7))
def test_validate_python_version(self, _):
error = validate._validate_python_version(2, 7)
self.assertIsNone(error)
@patch('validate.ctx', CTX)
@patch('validate._get_python_version', return_value=(2, 7))
def test_fail_validate_unacceptable_python_major_version(self, _):
error = validate._validate_python_version(2, 0)
self.assertIn('You must be running Python', error)
@patch('validate.ctx', CTX)
@patch('validate._get_python_version', return_value=(2, 7))
def test_fail_validate_unacceptable_python_minor_version(self, _):
error = validate._validate_python_version(3, 7)
self.assertIn('You must be running Python', error)
def test_get_python_version(self):
major = sys.version_info[0]
minor = sys.version_info[1]
version = validate._get_python_version()
self.assertEqual(version[0], major)
self.assertEqual(version[1], minor)
| StarcoderdataPython |
92103 | # -*- coding: utf-8 -*-
"""
Blackbird statistics plugins.
This plugin get the items queue for "stats" and
put the items queue for "item".
"""
import blackbird
from blackbird.plugins import base
class ConcreteJob(base.JobBase):
def __init__(self, options, queue=None, stats_queue=None, logger=None):
super(ConcreteJob, self).__init__(options, queue, logger)
self.stats = {
'blackbird.ping': 1,
'blackbird.version': blackbird.__version__,
'blackbird.queue.length': None,
'blackbird.zabbix_sender.processed': 0,
'blackbird.zabbix_sender.failed': 0,
'blackbird.zabbix_sender.total': 0,
}
self.stats_queue = stats_queue
def build_items(self):
"""
get the items from STATS QUEUE
calculate self.stats
make new items from self.stats
put the new items for ITEM QUEUE
"""
while not self.stats_queue.empty():
item = self.stats_queue.get()
self.calculate(item)
for key, value in self.stats.iteritems():
if 'blackbird.queue.length' == key:
value = self.queue.qsize()
item = BlackbirdStatisticsItem(
key=key,
value=value,
host=self.options['hostname']
)
if self.enqueue(item=item, queue=self.queue):
self.logger.debug(
'Inserted {0} to the queue.'.format(item.data)
)
def calculate(self, item):
if 'key' in item.data:
if item.data['key'] in self.stats.keys():
key = item.data['key']
self.stats[key] += item.data['value']
class BlackbirdStatisticsItem(base.ItemBase):
def __init__(self, key, value, host):
super(BlackbirdStatisticsItem, self).__init__(key, value, host)
self._data = dict()
super(BlackbirdStatisticsItem, self)._generate()
@property
def data(self):
return self._data
class Validator(base.ValidatorBase):
def __init__(self):
self.__spec = None
@property
def spec(self):
self.__spec = (
"[{0}]".format(__name__),
"hostname = string(default={0})".format(self.detect_hostname())
)
return self.__spec
| StarcoderdataPython |
38853 | from nose.tools import eq_
import amo.tests
from addons.models import (Addon, attach_categories, attach_tags,
attach_translations)
from addons.search import extract
class TestExtract(amo.tests.TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestExtract, self).setUp()
self.attrs = ('id', 'slug', 'created', 'last_updated',
'weekly_downloads', 'average_daily_users', 'status',
'type', 'hotness', 'is_disabled', 'premium_type')
self.transforms = (attach_categories, attach_tags, attach_translations)
def _extract(self):
qs = Addon.objects.filter(id__in=[3615])
for t in self.transforms:
qs = qs.transform(t)
self.addon = list(qs)[0]
return extract(self.addon)
def test_extract_attributes(self):
extracted = self._extract()
for attr in self.attrs:
eq_(extracted[attr], getattr(self.addon, attr))
| StarcoderdataPython |
3299402 | <gh_stars>0
# --------------------------------------------------------------------- #
# Name: "Calculadora de IMC"
# Version: "1.0.0"
# Description: "Realiza o Cálculo de Índice de Massa Corporal (IMC)"
# Author: ThiCremonez
# Language: pt-br
# --------------------------------------------------------------------- #
from tkinter import *
janela = Tk()
def indice_de_massa_corporal(peso, altura): # função que calcula o IMC
imc = peso / altura ** 2
return imc
def bt_click():
num1 = str(ed1.get())
num2 = str(ed2.get())
num3 = float(num1.replace(',', '.')) # converte a string peso em float e substitui a vírgula
num4 = float(num2.replace(',', '.')) # converte a altura e converte em float e substitui a vírgula
if indice_de_massa_corporal(num3, num4) < 18.5:
lb5["text"] = "Seu IMC é %1.2f \n Magreza \n Grau de obesidade = 0" % (indice_de_massa_corporal(num3, num4))
elif 18.5 <= indice_de_massa_corporal(num3, num4) < 25:
lb5["text"] = "Seu IMC é %1.2f \n Normal \n Grau de obesidade = 0" % (indice_de_massa_corporal(num3, num4))
elif 25 <= indice_de_massa_corporal(num3, num4) < 30:
lb5["text"] = "Seu IMC é %1.2f \n Sobrepeso \n Grau de obesidade = I" % (indice_de_massa_corporal(num3, num4))
elif 30 <= indice_de_massa_corporal(num3, num4) < 40:
lb5["text"] = "Seu IMC é %1.2f \n Obesidade \n Grau de obesidade = II" % (indice_de_massa_corporal(num3, num4))
else:
lb5["text"] = "Seu IMC é %1.2f \n Obesidade Grave \n Grau de obesidade = III" % (
indice_de_massa_corporal(num3, num4))
lb1 = Label(janela, text="----Cálculo de IMC----", bg="light grey")
lb2 = Label(janela, text="Digite seu peso em 'kg':")
lb3 = Label(janela, text="Digite sua altura em 'metros':")
lb4 = Label(janela, text="Resultado: ")
lb5 = Label(janela, text="") # mostra o resultado
ed1 = Entry(janela)
ed2 = Entry(janela)
bt = Button(janela, text="Calcular", command=bt_click)
lb1.grid(row=10, column=10, columnspan=20, sticky=W + E)
lb2.grid(row=20, column=10, sticky=E)
lb3.grid(row=30, column=10, sticky=E, pady=(0, 20))
lb4.grid(row=50, column=10, ipady=20, pady=(0, 40))
lb5.grid(row=50, column=20, pady=(0, 40))
ed1.grid(row=20, column=20, sticky=W)
ed2.grid(row=30, column=20, sticky=W, pady=(0, 20))
bt.grid(row=40, column=20, columnspan=10, sticky=W + E)
janela.title("Cálculo de IMC")
janela.geometry("300x200+200+200")
janela.mainloop()
| StarcoderdataPython |
1933448 | from otree.api import Currency as c, currency_range, expect
from . import pages
from ._builtin import Bot
from .models import Constants
class PlayerBot(Bot):
def play_round(self):
yield pages.Demographics, dict(age=24, gender=0, education=3, student=1, experiments=2, chosen_role=1, religion=0)
| StarcoderdataPython |
9753301 | <gh_stars>1-10
/usr/lib/python2.7/encodings/cp856.py | StarcoderdataPython |
11397050 | <reponame>Munene19/Galleryapp<gh_stars>0
# Generated by Django 3.1.3 on 2020-11-19 21:50
import cloudinary.models
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('pics', '0003_auto_20201116_0529'),
]
operations = [
migrations.AlterField(
model_name='category',
name='Image_category',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='image',
name='Image',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='image'),
),
migrations.AlterField(
model_name='location',
name='Image_location',
field=models.CharField(default=django.utils.timezone.now, max_length=30),
preserve_default=False,
),
]
| StarcoderdataPython |
1800762 | <filename>vision/google/cloud/vision_v1p2beta1/proto/geometry_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/vision_v1p2beta1/proto/geometry.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/vision_v1p2beta1/proto/geometry.proto",
package="google.cloud.vision.v1p2beta1",
syntax="proto3",
serialized_options=_b(
"\n!com.google.cloud.vision.v1p2beta1B\rGeometryProtoP\001ZCgoogle.golang.org/genproto/googleapis/cloud/vision/v1p2beta1;vision\370\001\001"
),
serialized_pb=_b(
'\n2google/cloud/vision_v1p2beta1/proto/geometry.proto\x12\x1dgoogle.cloud.vision.v1p2beta1"\x1e\n\x06Vertex\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"\x95\x01\n\x0c\x42oundingPoly\x12\x37\n\x08vertices\x18\x01 \x03(\x0b\x32%.google.cloud.vision.v1p2beta1.Vertex\x12L\n\x13normalized_vertices\x18\x02 \x03(\x0b\x32/.google.cloud.vision.v1p2beta1.NormalizedVertex"+\n\x08Position\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\x42|\n!com.google.cloud.vision.v1p2beta1B\rGeometryProtoP\x01ZCgoogle.golang.org/genproto/googleapis/cloud/vision/v1p2beta1;vision\xf8\x01\x01\x62\x06proto3'
),
)
_VERTEX = _descriptor.Descriptor(
name="Vertex",
full_name="google.cloud.vision.v1p2beta1.Vertex",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="x",
full_name="google.cloud.vision.v1p2beta1.Vertex.x",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="y",
full_name="google.cloud.vision.v1p2beta1.Vertex.y",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=85,
serialized_end=115,
)
_NORMALIZEDVERTEX = _descriptor.Descriptor(
name="NormalizedVertex",
full_name="google.cloud.vision.v1p2beta1.NormalizedVertex",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="x",
full_name="google.cloud.vision.v1p2beta1.NormalizedVertex.x",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="y",
full_name="google.cloud.vision.v1p2beta1.NormalizedVertex.y",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=117,
serialized_end=157,
)
_BOUNDINGPOLY = _descriptor.Descriptor(
name="BoundingPoly",
full_name="google.cloud.vision.v1p2beta1.BoundingPoly",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="vertices",
full_name="google.cloud.vision.v1p2beta1.BoundingPoly.vertices",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="normalized_vertices",
full_name="google.cloud.vision.v1p2beta1.BoundingPoly.normalized_vertices",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=160,
serialized_end=309,
)
_POSITION = _descriptor.Descriptor(
name="Position",
full_name="google.cloud.vision.v1p2beta1.Position",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="x",
full_name="google.cloud.vision.v1p2beta1.Position.x",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="y",
full_name="google.cloud.vision.v1p2beta1.Position.y",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="z",
full_name="google.cloud.vision.v1p2beta1.Position.z",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=311,
serialized_end=354,
)
_BOUNDINGPOLY.fields_by_name["vertices"].message_type = _VERTEX
_BOUNDINGPOLY.fields_by_name["normalized_vertices"].message_type = _NORMALIZEDVERTEX
DESCRIPTOR.message_types_by_name["Vertex"] = _VERTEX
DESCRIPTOR.message_types_by_name["NormalizedVertex"] = _NORMALIZEDVERTEX
DESCRIPTOR.message_types_by_name["BoundingPoly"] = _BOUNDINGPOLY
DESCRIPTOR.message_types_by_name["Position"] = _POSITION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Vertex = _reflection.GeneratedProtocolMessageType(
"Vertex",
(_message.Message,),
dict(
DESCRIPTOR=_VERTEX,
__module__="google.cloud.vision_v1p2beta1.proto.geometry_pb2",
__doc__="""X coordinate.
Attributes:
y:
Y coordinate.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p2beta1.Vertex)
),
)
_sym_db.RegisterMessage(Vertex)
NormalizedVertex = _reflection.GeneratedProtocolMessageType(
"NormalizedVertex",
(_message.Message,),
dict(
DESCRIPTOR=_NORMALIZEDVERTEX,
__module__="google.cloud.vision_v1p2beta1.proto.geometry_pb2",
__doc__="""X coordinate.
Attributes:
y:
Y coordinate.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p2beta1.NormalizedVertex)
),
)
_sym_db.RegisterMessage(NormalizedVertex)
BoundingPoly = _reflection.GeneratedProtocolMessageType(
"BoundingPoly",
(_message.Message,),
dict(
DESCRIPTOR=_BOUNDINGPOLY,
__module__="google.cloud.vision_v1p2beta1.proto.geometry_pb2",
__doc__="""A bounding polygon for the detected image annotation.
Attributes:
vertices:
The bounding polygon vertices.
normalized_vertices:
The bounding polygon normalized vertices.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p2beta1.BoundingPoly)
),
)
_sym_db.RegisterMessage(BoundingPoly)
Position = _reflection.GeneratedProtocolMessageType(
"Position",
(_message.Message,),
dict(
DESCRIPTOR=_POSITION,
__module__="google.cloud.vision_v1p2beta1.proto.geometry_pb2",
__doc__="""A 3D position in the image, used primarily for Face detection landmarks.
A valid Position must have both x and y coordinates. The position
coordinates are in the same scale as the original image.
Attributes:
x:
X coordinate.
y:
Y coordinate.
z:
Z coordinate (or depth).
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p2beta1.Position)
),
)
_sym_db.RegisterMessage(Position)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
5090136 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class BaradData(AbstractModel):
"""巴拉多返回的数据
"""
def __init__(self):
r"""
:param MetricName: 指标名(connum表示TCP活跃连接数;
new_conn表示新建TCP连接数;
inactive_conn表示非活跃连接数;
intraffic表示入流量;
outtraffic表示出流量;
alltraffic表示出流量和入流量之和;
inpkg表示入包速率;
outpkg表示出包速率;)
:type MetricName: str
:param Data: 值数组
:type Data: list of float
:param Count: 值数组的大小
:type Count: int
"""
self.MetricName = None
self.Data = None
self.Count = None
def _deserialize(self, params):
self.MetricName = params.get("MetricName")
self.Data = params.get("Data")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BoundIpInfo(AbstractModel):
"""高防包绑定IP对象
"""
def __init__(self):
r"""
:param Ip: IP地址
:type Ip: str
:param BizType: 绑定的产品分类,取值[public(CVM、CLB产品),bm(黑石产品),eni(弹性网卡),vpngw(VPN网关), natgw(NAT网关),waf(Web应用安全产品),fpc(金融产品),gaap(GAAP产品), other(托管IP)]
:type BizType: str
:param DeviceType: 产品分类下的子类型,取值[cvm(CVM),lb(负载均衡器),eni(弹性网卡),vpngw(VPN),natgw(NAT),waf(WAF),fpc(金融),gaap(GAAP),other(托管IP),eip(黑石弹性IP)]
:type DeviceType: str
:param InstanceId: IP所属的资源实例ID,当绑定新IP时必须填写此字段;例如是弹性网卡的IP,则InstanceId填写弹性网卡的ID(eni-*); 如果绑定的是托管IP没有对应的资源实例ID,请填写"none";
:type InstanceId: str
:param IspCode: 运营商,0:电信;1:联通;2:移动;5:BGP
:type IspCode: int
"""
self.Ip = None
self.BizType = None
self.DeviceType = None
self.InstanceId = None
self.IspCode = None
def _deserialize(self, params):
self.Ip = params.get("Ip")
self.BizType = params.get("BizType")
self.DeviceType = params.get("DeviceType")
self.InstanceId = params.get("InstanceId")
self.IspCode = params.get("IspCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CCAlarmThreshold(AbstractModel):
"""CC告警阈值
"""
def __init__(self):
r"""
:param AlarmThreshold: CC告警阈值
:type AlarmThreshold: int
"""
self.AlarmThreshold = None
def _deserialize(self, params):
self.AlarmThreshold = params.get("AlarmThreshold")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CCEventRecord(AbstractModel):
"""CC攻击事件记录
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Vip: 资源的IP
:type Vip: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
:param ReqQps: 总请求QPS峰值
:type ReqQps: int
:param DropQps: 攻击QPS峰值
:type DropQps: int
:param AttackStatus: 攻击状态,取值[0(攻击中), 1(攻击结束)]
:type AttackStatus: int
:param ResourceName: 资源名称
注意:此字段可能返回 null,表示取不到有效值。
:type ResourceName: str
:param DomainList: 域名列表
注意:此字段可能返回 null,表示取不到有效值。
:type DomainList: str
:param UriList: uri列表
注意:此字段可能返回 null,表示取不到有效值。
:type UriList: str
:param AttackipList: 攻击源列表
注意:此字段可能返回 null,表示取不到有效值。
:type AttackipList: str
"""
self.Business = None
self.Id = None
self.Vip = None
self.StartTime = None
self.EndTime = None
self.ReqQps = None
self.DropQps = None
self.AttackStatus = None
self.ResourceName = None
self.DomainList = None
self.UriList = None
self.AttackipList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Vip = params.get("Vip")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.ReqQps = params.get("ReqQps")
self.DropQps = params.get("DropQps")
self.AttackStatus = params.get("AttackStatus")
self.ResourceName = params.get("ResourceName")
self.DomainList = params.get("DomainList")
self.UriList = params.get("UriList")
self.AttackipList = params.get("AttackipList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CCFrequencyRule(AbstractModel):
"""CC的访问频率控制规则
"""
def __init__(self):
r"""
:param CCFrequencyRuleId: CC的访问频率控制规则ID
:type CCFrequencyRuleId: str
:param Uri: URI字符串,必须以/开头,例如/abc/a.php,长度不超过31;当URI=/时,匹配模式只能选择前缀匹配;
:type Uri: str
:param UserAgent: User-Agent字符串,长度不超过80
:type UserAgent: str
:param Cookie: Cookie字符串,长度不超过40
:type Cookie: str
:param Mode: 匹配规则,取值["include"(前缀匹配),"equal"(完全匹配)]
:type Mode: str
:param Period: 统计周期,单位秒,取值[10, 30, 60]
:type Period: int
:param ReqNumber: 访问次数,取值[1-10000]
:type ReqNumber: int
:param Act: 执行动作,取值["alg"(人机识别), "drop"(拦截)]
:type Act: str
:param ExeDuration: 执行时间,单位秒,取值[1-900]
:type ExeDuration: int
"""
self.CCFrequencyRuleId = None
self.Uri = None
self.UserAgent = None
self.Cookie = None
self.Mode = None
self.Period = None
self.ReqNumber = None
self.Act = None
self.ExeDuration = None
def _deserialize(self, params):
self.CCFrequencyRuleId = params.get("CCFrequencyRuleId")
self.Uri = params.get("Uri")
self.UserAgent = params.get("UserAgent")
self.Cookie = params.get("Cookie")
self.Mode = params.get("Mode")
self.Period = params.get("Period")
self.ReqNumber = params.get("ReqNumber")
self.Act = params.get("Act")
self.ExeDuration = params.get("ExeDuration")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CCPolicy(AbstractModel):
"""cc自定义规则
"""
def __init__(self):
r"""
:param Name: 策略名称
:type Name: str
:param Smode: 匹配模式,取值[matching(匹配模式), speedlimit(限速模式)]
:type Smode: str
:param SetId: 策略id
:type SetId: str
:param Frequency: 每分钟限制的次数
:type Frequency: int
:param ExeMode: 执行策略模式,拦截或者验证码,取值[alg(验证码), drop(拦截)]
:type ExeMode: str
:param Switch: 生效开关
:type Switch: int
:param CreateTime: 创建时间
:type CreateTime: str
:param RuleList: 规则列表
:type RuleList: list of CCRule
:param IpList: IP列表,如果不填时,请传空数组但不能为null;
:type IpList: list of str
:param Protocol: cc防护类型,取值[http,https]
:type Protocol: str
:param RuleId: 可选字段,表示HTTPS的CC防护域名对应的转发规则ID;
:type RuleId: str
:param Domain: HTTPS的CC防护域名
:type Domain: str
"""
self.Name = None
self.Smode = None
self.SetId = None
self.Frequency = None
self.ExeMode = None
self.Switch = None
self.CreateTime = None
self.RuleList = None
self.IpList = None
self.Protocol = None
self.RuleId = None
self.Domain = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Smode = params.get("Smode")
self.SetId = params.get("SetId")
self.Frequency = params.get("Frequency")
self.ExeMode = params.get("ExeMode")
self.Switch = params.get("Switch")
self.CreateTime = params.get("CreateTime")
if params.get("RuleList") is not None:
self.RuleList = []
for item in params.get("RuleList"):
obj = CCRule()
obj._deserialize(item)
self.RuleList.append(obj)
self.IpList = params.get("IpList")
self.Protocol = params.get("Protocol")
self.RuleId = params.get("RuleId")
self.Domain = params.get("Domain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CCRule(AbstractModel):
"""cc自定义策略配置的规则
"""
def __init__(self):
r"""
:param Skey: 规则的key, 可以为host、cgi、ua、referer
:type Skey: str
:param Operator: 规则的条件,可以为include、not_include、equal
:type Operator: str
:param Value: 规则的值,长度小于31字节
:type Value: str
"""
self.Skey = None
self.Operator = None
self.Value = None
def _deserialize(self, params):
self.Skey = params.get("Skey")
self.Operator = params.get("Operator")
self.Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CCRuleConfig(AbstractModel):
"""7层CC自定义规则
"""
def __init__(self):
r"""
:param Period: 统计周期,单位秒,取值[10, 30, 60]
:type Period: int
:param ReqNumber: 访问次数,取值[1-10000]
:type ReqNumber: int
:param Action: 执行动作,取值["alg"(人机识别), "drop"(拦截)]
:type Action: str
:param ExeDuration: 执行时间,单位秒,取值[1-900]
:type ExeDuration: int
"""
self.Period = None
self.ReqNumber = None
self.Action = None
self.ExeDuration = None
def _deserialize(self, params):
self.Period = params.get("Period")
self.ReqNumber = params.get("ReqNumber")
self.Action = params.get("Action")
self.ExeDuration = params.get("ExeDuration")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateBasicDDoSAlarmThresholdRequest(AbstractModel):
"""CreateBasicDDoSAlarmThreshold请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(basic表示DDoS基础防护)
:type Business: str
:param Method: =get表示读取告警阈值;=set表示设置告警阈值;
:type Method: str
:param AlarmType: 可选,告警阈值类型,1-入流量,2-清洗流量;当Method为set时必须填写;
:type AlarmType: int
:param AlarmThreshold: 可选,告警阈值,当Method为set时必须填写;当设置阈值为0时表示清除告警阈值配置;
:type AlarmThreshold: int
"""
self.Business = None
self.Method = None
self.AlarmType = None
self.AlarmThreshold = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Method = params.get("Method")
self.AlarmType = params.get("AlarmType")
self.AlarmThreshold = params.get("AlarmThreshold")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateBasicDDoSAlarmThresholdResponse(AbstractModel):
"""CreateBasicDDoSAlarmThreshold返回参数结构体
"""
def __init__(self):
r"""
:param AlarmThreshold: 当存在告警阈值配置时,返回告警阈值大于0,当不存在告警配置时,返回告警阈值为0;
:type AlarmThreshold: int
:param AlarmType: 告警阈值类型,1-入流量,2-清洗流量;当AlarmThreshold大于0时有效;
:type AlarmType: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AlarmThreshold = None
self.AlarmType = None
self.RequestId = None
def _deserialize(self, params):
self.AlarmThreshold = params.get("AlarmThreshold")
self.AlarmType = params.get("AlarmType")
self.RequestId = params.get("RequestId")
class CreateBoundIPRequest(AbstractModel):
"""CreateBoundIP请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgp表示独享包;bgp-multip表示共享包)
:type Business: str
:param Id: 资源实例ID
:type Id: str
:param BoundDevList: 绑定到资源实例的IP数组,当资源实例为高防包(独享包)时,数组只允许填1个IP;当没有要绑定的IP时可以为空数组;但是BoundDevList和UnBoundDevList至少有一个不为空;
:type BoundDevList: list of BoundIpInfo
:param UnBoundDevList: 与资源实例解绑的IP数组,当资源实例为高防包(独享包)时,数组只允许填1个IP;当没有要解绑的IP时可以为空数组;但是BoundDevList和UnBoundDevList至少有一个不为空;
:type UnBoundDevList: list of BoundIpInfo
:param CopyPolicy: 已弃用,不填
:type CopyPolicy: str
"""
self.Business = None
self.Id = None
self.BoundDevList = None
self.UnBoundDevList = None
self.CopyPolicy = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("BoundDevList") is not None:
self.BoundDevList = []
for item in params.get("BoundDevList"):
obj = BoundIpInfo()
obj._deserialize(item)
self.BoundDevList.append(obj)
if params.get("UnBoundDevList") is not None:
self.UnBoundDevList = []
for item in params.get("UnBoundDevList"):
obj = BoundIpInfo()
obj._deserialize(item)
self.UnBoundDevList.append(obj)
self.CopyPolicy = params.get("CopyPolicy")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateBoundIPResponse(AbstractModel):
"""CreateBoundIP返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateCCFrequencyRulesRequest(AbstractModel):
"""CreateCCFrequencyRules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleId: 7层转发规则ID(通过获取7层转发规则接口可以获取规则ID)
:type RuleId: str
:param Mode: 匹配规则,取值["include"(前缀匹配),"equal"(完全匹配)]
:type Mode: str
:param Period: 统计周期,单位秒,取值[10, 30, 60]
:type Period: int
:param ReqNumber: 访问次数,取值[1-10000]
:type ReqNumber: int
:param Act: 执行动作,取值["alg"(人机识别), "drop"(拦截)]
:type Act: str
:param ExeDuration: 执行时间,单位秒,取值[1-900]
:type ExeDuration: int
:param Uri: URI字符串,必须以/开头,例如/abc/a.php,长度不超过31;当URI=/时,匹配模式只能选择前缀匹配;
:type Uri: str
:param UserAgent: User-Agent字符串,长度不超过80
:type UserAgent: str
:param Cookie: Cookie字符串,长度不超过40
:type Cookie: str
"""
self.Business = None
self.Id = None
self.RuleId = None
self.Mode = None
self.Period = None
self.ReqNumber = None
self.Act = None
self.ExeDuration = None
self.Uri = None
self.UserAgent = None
self.Cookie = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleId = params.get("RuleId")
self.Mode = params.get("Mode")
self.Period = params.get("Period")
self.ReqNumber = params.get("ReqNumber")
self.Act = params.get("Act")
self.ExeDuration = params.get("ExeDuration")
self.Uri = params.get("Uri")
self.UserAgent = params.get("UserAgent")
self.Cookie = params.get("Cookie")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateCCFrequencyRulesResponse(AbstractModel):
"""CreateCCFrequencyRules返回参数结构体
"""
def __init__(self):
r"""
:param CCFrequencyRuleId: CC防护的访问频率控制规则ID
:type CCFrequencyRuleId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CCFrequencyRuleId = None
self.RequestId = None
def _deserialize(self, params):
self.CCFrequencyRuleId = params.get("CCFrequencyRuleId")
self.RequestId = params.get("RequestId")
class CreateCCSelfDefinePolicyRequest(AbstractModel):
"""CreateCCSelfDefinePolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Policy: CC策略描述
:type Policy: :class:`tencentcloud.dayu.v20180709.models.CCPolicy`
"""
self.Business = None
self.Id = None
self.Policy = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Policy") is not None:
self.Policy = CCPolicy()
self.Policy._deserialize(params.get("Policy"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateCCSelfDefinePolicyResponse(AbstractModel):
"""CreateCCSelfDefinePolicy返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateDDoSPolicyCaseRequest(AbstractModel):
"""CreateDDoSPolicyCase请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param CaseName: 策略场景名,字符串长度小于64
:type CaseName: str
:param PlatformTypes: 开发平台,取值[PC(PC客户端), MOBILE(移动端), TV(电视端), SERVER(主机)]
:type PlatformTypes: list of str
:param AppType: 细分品类,取值[WEB(网站), GAME(游戏), APP(应用), OTHER(其他)]
:type AppType: str
:param AppProtocols: 应用协议,取值[tcp(TCP协议),udp(UDP协议),icmp(ICMP协议),all(其他协议)]
:type AppProtocols: list of str
:param TcpSportStart: TCP业务起始端口,取值(0, 65535]
:type TcpSportStart: str
:param TcpSportEnd: TCP业务结束端口,取值(0, 65535],必须大于等于TCP业务起始端口
:type TcpSportEnd: str
:param UdpSportStart: UDP业务起始端口,取值范围(0, 65535]
:type UdpSportStart: str
:param UdpSportEnd: UDP业务结束端口,取值范围(0, 65535),必须大于等于UDP业务起始端口
:type UdpSportEnd: str
:param HasAbroad: 是否有海外客户,取值[no(没有), yes(有)]
:type HasAbroad: str
:param HasInitiateTcp: 是否会主动对外发起TCP请求,取值[no(不会), yes(会)]
:type HasInitiateTcp: str
:param HasInitiateUdp: 是否会主动对外发起UDP业务请求,取值[no(不会), yes(会)]
:type HasInitiateUdp: str
:param PeerTcpPort: 主动发起TCP请求的端口,取值范围(0, 65535]
:type PeerTcpPort: str
:param PeerUdpPort: 主动发起UDP请求的端口,取值范围(0, 65535]
:type PeerUdpPort: str
:param TcpFootprint: TCP载荷的固定特征码,字符串长度小于512
:type TcpFootprint: str
:param UdpFootprint: UDP载荷的固定特征码,字符串长度小于512
:type UdpFootprint: str
:param WebApiUrl: Web业务的API的URL
:type WebApiUrl: list of str
:param MinTcpPackageLen: TCP业务报文长度最小值,取值范围(0, 1500)
:type MinTcpPackageLen: str
:param MaxTcpPackageLen: TCP业务报文长度最大值,取值范围(0, 1500),必须大于等于TCP业务报文长度最小值
:type MaxTcpPackageLen: str
:param MinUdpPackageLen: UDP业务报文长度最小值,取值范围(0, 1500)
:type MinUdpPackageLen: str
:param MaxUdpPackageLen: UDP业务报文长度最大值,取值范围(0, 1500),必须大于等于UDP业务报文长度最小值
:type MaxUdpPackageLen: str
:param HasVPN: 是否有VPN业务,取值[no(没有), yes(有)]
:type HasVPN: str
:param TcpPortList: TCP业务端口列表,同时支持单个端口和端口段,字符串格式,例如:80,443,700-800,53,1000-3000
:type TcpPortList: str
:param UdpPortList: UDP业务端口列表,同时支持单个端口和端口段,字符串格式,例如:80,443,700-800,53,1000-3000
:type UdpPortList: str
"""
self.Business = None
self.CaseName = None
self.PlatformTypes = None
self.AppType = None
self.AppProtocols = None
self.TcpSportStart = None
self.TcpSportEnd = None
self.UdpSportStart = None
self.UdpSportEnd = None
self.HasAbroad = None
self.HasInitiateTcp = None
self.HasInitiateUdp = None
self.PeerTcpPort = None
self.PeerUdpPort = None
self.TcpFootprint = None
self.UdpFootprint = None
self.WebApiUrl = None
self.MinTcpPackageLen = None
self.MaxTcpPackageLen = None
self.MinUdpPackageLen = None
self.MaxUdpPackageLen = None
self.HasVPN = None
self.TcpPortList = None
self.UdpPortList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.CaseName = params.get("CaseName")
self.PlatformTypes = params.get("PlatformTypes")
self.AppType = params.get("AppType")
self.AppProtocols = params.get("AppProtocols")
self.TcpSportStart = params.get("TcpSportStart")
self.TcpSportEnd = params.get("TcpSportEnd")
self.UdpSportStart = params.get("UdpSportStart")
self.UdpSportEnd = params.get("UdpSportEnd")
self.HasAbroad = params.get("HasAbroad")
self.HasInitiateTcp = params.get("HasInitiateTcp")
self.HasInitiateUdp = params.get("HasInitiateUdp")
self.PeerTcpPort = params.get("PeerTcpPort")
self.PeerUdpPort = params.get("PeerUdpPort")
self.TcpFootprint = params.get("TcpFootprint")
self.UdpFootprint = params.get("UdpFootprint")
self.WebApiUrl = params.get("WebApiUrl")
self.MinTcpPackageLen = params.get("MinTcpPackageLen")
self.MaxTcpPackageLen = params.get("MaxTcpPackageLen")
self.MinUdpPackageLen = params.get("MinUdpPackageLen")
self.MaxUdpPackageLen = params.get("MaxUdpPackageLen")
self.HasVPN = params.get("HasVPN")
self.TcpPortList = params.get("TcpPortList")
self.UdpPortList = params.get("UdpPortList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDDoSPolicyCaseResponse(AbstractModel):
"""CreateDDoSPolicyCase返回参数结构体
"""
def __init__(self):
r"""
:param SceneId: 策略场景ID
:type SceneId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SceneId = None
self.RequestId = None
def _deserialize(self, params):
self.SceneId = params.get("SceneId")
self.RequestId = params.get("RequestId")
class CreateDDoSPolicyRequest(AbstractModel):
"""CreateDDoSPolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param DropOptions: 协议禁用,必须填写且数组长度必须为1
:type DropOptions: list of DDoSPolicyDropOption
:param Name: 策略名称
:type Name: str
:param PortLimits: 端口禁用,当没有禁用端口时填空数组
:type PortLimits: list of DDoSPolicyPortLimit
:param IpAllowDenys: 请求源IP黑白名单,当没有IP黑白名单时填空数组
:type IpAllowDenys: list of IpBlackWhite
:param PacketFilters: 报文过滤,当没有报文过滤时填空数组
:type PacketFilters: list of DDoSPolicyPacketFilter
:param WaterPrint: 水印策略参数,当没有启用水印功能时填空数组,最多只能传一条水印策略(即数组大小不超过1)
:type WaterPrint: list of WaterPrintPolicy
"""
self.Business = None
self.DropOptions = None
self.Name = None
self.PortLimits = None
self.IpAllowDenys = None
self.PacketFilters = None
self.WaterPrint = None
def _deserialize(self, params):
self.Business = params.get("Business")
if params.get("DropOptions") is not None:
self.DropOptions = []
for item in params.get("DropOptions"):
obj = DDoSPolicyDropOption()
obj._deserialize(item)
self.DropOptions.append(obj)
self.Name = params.get("Name")
if params.get("PortLimits") is not None:
self.PortLimits = []
for item in params.get("PortLimits"):
obj = DDoSPolicyPortLimit()
obj._deserialize(item)
self.PortLimits.append(obj)
if params.get("IpAllowDenys") is not None:
self.IpAllowDenys = []
for item in params.get("IpAllowDenys"):
obj = IpBlackWhite()
obj._deserialize(item)
self.IpAllowDenys.append(obj)
if params.get("PacketFilters") is not None:
self.PacketFilters = []
for item in params.get("PacketFilters"):
obj = DDoSPolicyPacketFilter()
obj._deserialize(item)
self.PacketFilters.append(obj)
if params.get("WaterPrint") is not None:
self.WaterPrint = []
for item in params.get("WaterPrint"):
obj = WaterPrintPolicy()
obj._deserialize(item)
self.WaterPrint.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDDoSPolicyResponse(AbstractModel):
"""CreateDDoSPolicy返回参数结构体
"""
def __init__(self):
r"""
:param PolicyId: 策略ID
:type PolicyId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PolicyId = None
self.RequestId = None
def _deserialize(self, params):
self.PolicyId = params.get("PolicyId")
self.RequestId = params.get("RequestId")
class CreateInstanceNameRequest(AbstractModel):
"""CreateInstanceName请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Name: 资源实例名称,长度不超过32个字符
:type Name: str
"""
self.Business = None
self.Id = None
self.Name = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateInstanceNameResponse(AbstractModel):
"""CreateInstanceName返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateL4HealthConfigRequest(AbstractModel):
"""CreateL4HealthConfig请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param HealthConfig: 四层健康检查配置数组
:type HealthConfig: list of L4HealthConfig
"""
self.Business = None
self.Id = None
self.HealthConfig = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("HealthConfig") is not None:
self.HealthConfig = []
for item in params.get("HealthConfig"):
obj = L4HealthConfig()
obj._deserialize(item)
self.HealthConfig.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateL4HealthConfigResponse(AbstractModel):
"""CreateL4HealthConfig返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateL4RulesRequest(AbstractModel):
"""CreateL4Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Rules: 规则列表
:type Rules: list of L4RuleEntry
"""
self.Business = None
self.Id = None
self.Rules = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = L4RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateL4RulesResponse(AbstractModel):
"""CreateL4Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateL7CCRuleRequest(AbstractModel):
"""CreateL7CCRule请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Method: 操作码,取值[query(表示查询),add(表示添加),del(表示删除)]
:type Method: str
:param RuleId: 7层转发规则ID,例如:rule-0000001
:type RuleId: str
:param RuleConfig: 7层CC自定义规则参数,当操作码为query时,可以不用填写;当操作码为add或del时,必须填写,且数组长度只能为1;
:type RuleConfig: list of CCRuleConfig
"""
self.Business = None
self.Id = None
self.Method = None
self.RuleId = None
self.RuleConfig = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Method = params.get("Method")
self.RuleId = params.get("RuleId")
if params.get("RuleConfig") is not None:
self.RuleConfig = []
for item in params.get("RuleConfig"):
obj = CCRuleConfig()
obj._deserialize(item)
self.RuleConfig.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateL7CCRuleResponse(AbstractModel):
"""CreateL7CCRule返回参数结构体
"""
def __init__(self):
r"""
:param RuleConfig: 7层CC自定义规则参数,当没有开启CC自定义规则时,返回空数组
:type RuleConfig: list of CCRuleConfig
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RuleConfig = None
self.RequestId = None
def _deserialize(self, params):
if params.get("RuleConfig") is not None:
self.RuleConfig = []
for item in params.get("RuleConfig"):
obj = CCRuleConfig()
obj._deserialize(item)
self.RuleConfig.append(obj)
self.RequestId = params.get("RequestId")
class CreateL7HealthConfigRequest(AbstractModel):
"""CreateL7HealthConfig请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param HealthConfig: 七层健康检查配置数组
:type HealthConfig: list of L7HealthConfig
"""
self.Business = None
self.Id = None
self.HealthConfig = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("HealthConfig") is not None:
self.HealthConfig = []
for item in params.get("HealthConfig"):
obj = L7HealthConfig()
obj._deserialize(item)
self.HealthConfig.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateL7HealthConfigResponse(AbstractModel):
"""CreateL7HealthConfig返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateL7RuleCertRequest(AbstractModel):
"""CreateL7RuleCert请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源实例ID,例如高防IP实例的ID,高防IP专业版实例的ID
:type Id: str
:param RuleId: 规则ID
:type RuleId: str
:param CertType: 证书类型,当为协议为HTTPS协议时必须填,取值[2(腾讯云托管证书)]
:type CertType: int
:param SSLId: 当证书来源为腾讯云托管证书时,此字段必须填写托管证书ID
:type SSLId: str
:param Cert: 当证书来源为自有证书时,此字段必须填写证书内容;(因已不再支持自有证书,此字段已弃用,请不用填写此字段)
:type Cert: str
:param PrivateKey: 当证书来源为自有证书时,此字段必须填写证书密钥;(因已不再支持自有证书,此字段已弃用,请不用填写此字段)
:type PrivateKey: str
"""
self.Business = None
self.Id = None
self.RuleId = None
self.CertType = None
self.SSLId = None
self.Cert = None
self.PrivateKey = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleId = params.get("RuleId")
self.CertType = params.get("CertType")
self.SSLId = params.get("SSLId")
self.Cert = params.get("Cert")
self.PrivateKey = params.get("PrivateKey")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateL7RuleCertResponse(AbstractModel):
"""CreateL7RuleCert返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateL7RulesRequest(AbstractModel):
"""CreateL7Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Rules: 规则列表
:type Rules: list of L7RuleEntry
"""
self.Business = None
self.Id = None
self.Rules = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = L7RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateL7RulesResponse(AbstractModel):
"""CreateL7Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateL7RulesUploadRequest(AbstractModel):
"""CreateL7RulesUpload请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Rules: 规则列表
:type Rules: list of L7RuleEntry
"""
self.Business = None
self.Id = None
self.Rules = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = L7RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateL7RulesUploadResponse(AbstractModel):
"""CreateL7RulesUpload返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateNetReturnRequest(AbstractModel):
"""CreateNetReturn请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源实例ID
:type Id: str
"""
self.Business = None
self.Id = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateNetReturnResponse(AbstractModel):
"""CreateNetReturn返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateNewL4RulesRequest(AbstractModel):
"""CreateNewL4Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 高防产品代号:bgpip
:type Business: str
:param IdList: 添加规则资源列表
:type IdList: list of str
:param VipList: 添加规则IP列表
:type VipList: list of str
:param Rules: 规则列表
:type Rules: list of L4RuleEntry
"""
self.Business = None
self.IdList = None
self.VipList = None
self.Rules = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.IdList = params.get("IdList")
self.VipList = params.get("VipList")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = L4RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateNewL4RulesResponse(AbstractModel):
"""CreateNewL4Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateNewL7RulesRequest(AbstractModel):
"""CreateNewL7Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param IdList: 资源ID列表
:type IdList: list of str
:param VipList: 资源IP列表
:type VipList: list of str
:param Rules: 规则列表
:type Rules: list of L7RuleEntry
"""
self.Business = None
self.IdList = None
self.VipList = None
self.Rules = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.IdList = params.get("IdList")
self.VipList = params.get("VipList")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = L7RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateNewL7RulesResponse(AbstractModel):
"""CreateNewL7Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateNewL7RulesUploadRequest(AbstractModel):
"""CreateNewL7RulesUpload请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param IdList: 资源ID列表
:type IdList: list of str
:param VipList: 资源IP列表
:type VipList: list of str
:param Rules: 规则列表
:type Rules: list of L7RuleEntry
"""
self.Business = None
self.IdList = None
self.VipList = None
self.Rules = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.IdList = params.get("IdList")
self.VipList = params.get("VipList")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = L7RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateNewL7RulesUploadResponse(AbstractModel):
"""CreateNewL7RulesUpload返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class CreateUnblockIpRequest(AbstractModel):
"""CreateUnblockIp请求参数结构体
"""
def __init__(self):
r"""
:param Ip: IP
:type Ip: str
:param ActionType: 解封类型(user:自助解封;auto:自动解封; update:升级解封;bind:绑定高防包解封)
:type ActionType: str
"""
self.Ip = None
self.ActionType = None
def _deserialize(self, params):
self.Ip = params.get("Ip")
self.ActionType = params.get("ActionType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateUnblockIpResponse(AbstractModel):
"""CreateUnblockIp返回参数结构体
"""
def __init__(self):
r"""
:param Ip: IP
:type Ip: str
:param ActionType: 解封类型(user:自助解封;auto:自动解封; update:升级解封;bind:绑定高防包解封)
:type ActionType: str
:param UnblockTime: 解封时间(预计解封时间)
:type UnblockTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Ip = None
self.ActionType = None
self.UnblockTime = None
self.RequestId = None
def _deserialize(self, params):
self.Ip = params.get("Ip")
self.ActionType = params.get("ActionType")
self.UnblockTime = params.get("UnblockTime")
self.RequestId = params.get("RequestId")
class DDoSAlarmThreshold(AbstractModel):
"""DDoS告警阈值
"""
def __init__(self):
r"""
:param AlarmType: 告警阈值类型,1-入流量,2-清洗流量
:type AlarmType: int
:param AlarmThreshold: 告警阈值,大于0(目前排定的值)
:type AlarmThreshold: int
"""
self.AlarmType = None
self.AlarmThreshold = None
def _deserialize(self, params):
self.AlarmType = params.get("AlarmType")
self.AlarmThreshold = params.get("AlarmThreshold")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DDoSAttackSourceRecord(AbstractModel):
"""攻击源信息
"""
def __init__(self):
r"""
:param SrcIp: 攻击源ip
:type SrcIp: str
:param Province: 省份(国内有效,不包含港澳台)
:type Province: str
:param Nation: 国家
:type Nation: str
:param PacketSum: 累计攻击包量
:type PacketSum: int
:param PacketLen: 累计攻击流量
:type PacketLen: int
"""
self.SrcIp = None
self.Province = None
self.Nation = None
self.PacketSum = None
self.PacketLen = None
def _deserialize(self, params):
self.SrcIp = params.get("SrcIp")
self.Province = params.get("Province")
self.Nation = params.get("Nation")
self.PacketSum = params.get("PacketSum")
self.PacketLen = params.get("PacketLen")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DDoSEventRecord(AbstractModel):
"""DDoS攻击事件记录
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Vip: 资源的IP
:type Vip: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
:param Mbps: 攻击最大带宽
:type Mbps: int
:param Pps: 攻击最大包速率
:type Pps: int
:param AttackType: 攻击类型
:type AttackType: str
:param BlockFlag: 是否被封堵,取值[1(是),0(否),2(无效值)]
:type BlockFlag: int
:param OverLoad: 是否超过弹性防护峰值,取值取值[yes(是),no(否),空字符串(未知值)]
:type OverLoad: str
:param AttackStatus: 攻击状态,取值[0(攻击中), 1(攻击结束)]
:type AttackStatus: int
:param ResourceName: 资源名称
注意:此字段可能返回 null,表示取不到有效值。
:type ResourceName: str
:param EventId: 攻击事件Id
注意:此字段可能返回 null,表示取不到有效值。
:type EventId: str
"""
self.Business = None
self.Id = None
self.Vip = None
self.StartTime = None
self.EndTime = None
self.Mbps = None
self.Pps = None
self.AttackType = None
self.BlockFlag = None
self.OverLoad = None
self.AttackStatus = None
self.ResourceName = None
self.EventId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Vip = params.get("Vip")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Mbps = params.get("Mbps")
self.Pps = params.get("Pps")
self.AttackType = params.get("AttackType")
self.BlockFlag = params.get("BlockFlag")
self.OverLoad = params.get("OverLoad")
self.AttackStatus = params.get("AttackStatus")
self.ResourceName = params.get("ResourceName")
self.EventId = params.get("EventId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DDoSPolicyDropOption(AbstractModel):
"""DDoS高级策略的禁用协议选项
"""
def __init__(self):
r"""
:param DropTcp: 禁用TCP协议,取值范围[0,1]
:type DropTcp: int
:param DropUdp: 禁用UDP协议,取值范围[0,1]
:type DropUdp: int
:param DropIcmp: 禁用ICMP协议,取值范围[0,1]
:type DropIcmp: int
:param DropOther: 禁用其他协议,取值范围[0,1]
:type DropOther: int
:param DropAbroad: 拒绝海外流量,取值范围[0,1]
:type DropAbroad: int
:param CheckSyncConn: 空连接防护,取值范围[0,1]
:type CheckSyncConn: int
:param SdNewLimit: 基于来源IP及目的IP的新建连接抑制,取值范围[0,4294967295]
:type SdNewLimit: int
:param DstNewLimit: 基于目的IP的新建连接抑制,取值范围[0,4294967295]
:type DstNewLimit: int
:param SdConnLimit: 基于来源IP及目的IP的并发连接抑制,取值范围[0,4294967295]
:type SdConnLimit: int
:param DstConnLimit: 基于目的IP的并发连接抑制,取值范围[0,4294967295]
:type DstConnLimit: int
:param BadConnThreshold: 基于连接抑制触发阈值,取值范围[0,4294967295]
:type BadConnThreshold: int
:param NullConnEnable: 异常连接检测条件,空连接防护开关,,取值范围[0,1]
:type NullConnEnable: int
:param ConnTimeout: 异常连接检测条件,连接超时,,取值范围[0,65535]
:type ConnTimeout: int
:param SynRate: 异常连接检测条件,syn占比ack百分比,,取值范围[0,100]
:type SynRate: int
:param SynLimit: 异常连接检测条件,syn阈值,取值范围[0,100]
:type SynLimit: int
:param DTcpMbpsLimit: tcp限速,取值范围[0,4294967295]
:type DTcpMbpsLimit: int
:param DUdpMbpsLimit: udp限速,取值范围[0,4294967295]
:type DUdpMbpsLimit: int
:param DIcmpMbpsLimit: icmp限速,取值范围[0,4294967295]
:type DIcmpMbpsLimit: int
:param DOtherMbpsLimit: other协议限速,取值范围[0,4294967295]
:type DOtherMbpsLimit: int
"""
self.DropTcp = None
self.DropUdp = None
self.DropIcmp = None
self.DropOther = None
self.DropAbroad = None
self.CheckSyncConn = None
self.SdNewLimit = None
self.DstNewLimit = None
self.SdConnLimit = None
self.DstConnLimit = None
self.BadConnThreshold = None
self.NullConnEnable = None
self.ConnTimeout = None
self.SynRate = None
self.SynLimit = None
self.DTcpMbpsLimit = None
self.DUdpMbpsLimit = None
self.DIcmpMbpsLimit = None
self.DOtherMbpsLimit = None
def _deserialize(self, params):
self.DropTcp = params.get("DropTcp")
self.DropUdp = params.get("DropUdp")
self.DropIcmp = params.get("DropIcmp")
self.DropOther = params.get("DropOther")
self.DropAbroad = params.get("DropAbroad")
self.CheckSyncConn = params.get("CheckSyncConn")
self.SdNewLimit = params.get("SdNewLimit")
self.DstNewLimit = params.get("DstNewLimit")
self.SdConnLimit = params.get("SdConnLimit")
self.DstConnLimit = params.get("DstConnLimit")
self.BadConnThreshold = params.get("BadConnThreshold")
self.NullConnEnable = params.get("NullConnEnable")
self.ConnTimeout = params.get("ConnTimeout")
self.SynRate = params.get("SynRate")
self.SynLimit = params.get("SynLimit")
self.DTcpMbpsLimit = params.get("DTcpMbpsLimit")
self.DUdpMbpsLimit = params.get("DUdpMbpsLimit")
self.DIcmpMbpsLimit = params.get("DIcmpMbpsLimit")
self.DOtherMbpsLimit = params.get("DOtherMbpsLimit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DDoSPolicyPacketFilter(AbstractModel):
"""DDoS高级策略的报文过滤项
"""
def __init__(self):
r"""
:param Protocol: 协议,取值范围[tcp,udp,icmp,all]
:type Protocol: str
:param SportStart: 开始源端口,取值范围[0,65535]
:type SportStart: int
:param SportEnd: 结束源端口,取值范围[0,65535]
:type SportEnd: int
:param DportStart: 开始目的端口,取值范围[0,65535]
:type DportStart: int
:param DportEnd: 结束目的端口,取值范围[0,65535]
:type DportEnd: int
:param PktlenMin: 最小包长,取值范围[0,1500]
:type PktlenMin: int
:param PktlenMax: 最大包长,取值范围[0,1500]
:type PktlenMax: int
:param MatchBegin: 是否检测载荷,取值范围[
begin_l3(IP头)
begin_l4(TCP头)
begin_l5(载荷)
no_match(不检测)
]
:type MatchBegin: str
:param MatchType: 是否是正则表达式,取值范围[sunday(表示关键字),pcre(表示正则表达式)]
:type MatchType: str
:param Str: 关键字或正则表达式
:type Str: str
:param Depth: 检测深度,取值范围[0,1500]
:type Depth: int
:param Offset: 检测偏移量,取值范围[0,1500]
:type Offset: int
:param IsNot: 是否包括,取值范围[0(表示不包含),1(表示包含)]
:type IsNot: int
:param Action: 策略动作,取值范围[drop,drop_black,drop_rst,drop_black_rst,transmit]
:type Action: str
"""
self.Protocol = None
self.SportStart = None
self.SportEnd = None
self.DportStart = None
self.DportEnd = None
self.PktlenMin = None
self.PktlenMax = None
self.MatchBegin = None
self.MatchType = None
self.Str = None
self.Depth = None
self.Offset = None
self.IsNot = None
self.Action = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.SportStart = params.get("SportStart")
self.SportEnd = params.get("SportEnd")
self.DportStart = params.get("DportStart")
self.DportEnd = params.get("DportEnd")
self.PktlenMin = params.get("PktlenMin")
self.PktlenMax = params.get("PktlenMax")
self.MatchBegin = params.get("MatchBegin")
self.MatchType = params.get("MatchType")
self.Str = params.get("Str")
self.Depth = params.get("Depth")
self.Offset = params.get("Offset")
self.IsNot = params.get("IsNot")
self.Action = params.get("Action")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DDoSPolicyPortLimit(AbstractModel):
"""DDoS高级策略的禁用端口
"""
def __init__(self):
r"""
:param Protocol: 协议,取值范围[tcp,udp,all]
:type Protocol: str
:param DPortStart: 开始目的端口,取值范围[0,65535]
:type DPortStart: int
:param DPortEnd: 结束目的端口,取值范围[0,65535],要求大于等于开始目的端口
:type DPortEnd: int
:param SPortStart: 开始源端口,取值范围[0,65535]
注意:此字段可能返回 null,表示取不到有效值。
:type SPortStart: int
:param SPortEnd: 结束源端口,取值范围[0,65535],要求大于等于开始源端口
注意:此字段可能返回 null,表示取不到有效值。
:type SPortEnd: int
:param Action: 执行动作,取值[drop(丢弃) ,transmit(转发)]
注意:此字段可能返回 null,表示取不到有效值。
:type Action: str
:param Kind: 禁用端口类型,取值[0(目的端口范围禁用), 1(源端口范围禁用), 2(目的和源端口范围同时禁用)]
注意:此字段可能返回 null,表示取不到有效值。
:type Kind: int
"""
self.Protocol = None
self.DPortStart = None
self.DPortEnd = None
self.SPortStart = None
self.SPortEnd = None
self.Action = None
self.Kind = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.DPortStart = params.get("DPortStart")
self.DPortEnd = params.get("DPortEnd")
self.SPortStart = params.get("SPortStart")
self.SPortEnd = params.get("SPortEnd")
self.Action = params.get("Action")
self.Kind = params.get("Kind")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DDosPolicy(AbstractModel):
"""DDoS高级策略
"""
def __init__(self):
r"""
:param Resources: 策略绑定的资源
:type Resources: list of ResourceIp
:param DropOptions: 禁用协议
:type DropOptions: :class:`tencentcloud.dayu.v20180709.models.DDoSPolicyDropOption`
:param PortLimits: 禁用端口
:type PortLimits: list of DDoSPolicyPortLimit
:param PacketFilters: 报文过滤
:type PacketFilters: list of DDoSPolicyPacketFilter
:param IpBlackWhiteLists: 黑白IP名单
:type IpBlackWhiteLists: list of IpBlackWhite
:param PolicyId: 策略ID
:type PolicyId: str
:param PolicyName: 策略名称
:type PolicyName: str
:param CreateTime: 策略创建时间
:type CreateTime: str
:param WaterPrint: 水印策略参数,最多只有一个,当没有水印策略时数组为空
:type WaterPrint: list of WaterPrintPolicy
:param WaterKey: 水印密钥,最多只有2个,当没有水印策略时数组为空
:type WaterKey: list of WaterPrintKey
:param BoundResources: 策略绑定的资源实例
注意:此字段可能返回 null,表示取不到有效值。
:type BoundResources: list of str
:param SceneId: 策略所属的策略场景
注意:此字段可能返回 null,表示取不到有效值。
:type SceneId: str
"""
self.Resources = None
self.DropOptions = None
self.PortLimits = None
self.PacketFilters = None
self.IpBlackWhiteLists = None
self.PolicyId = None
self.PolicyName = None
self.CreateTime = None
self.WaterPrint = None
self.WaterKey = None
self.BoundResources = None
self.SceneId = None
def _deserialize(self, params):
if params.get("Resources") is not None:
self.Resources = []
for item in params.get("Resources"):
obj = ResourceIp()
obj._deserialize(item)
self.Resources.append(obj)
if params.get("DropOptions") is not None:
self.DropOptions = DDoSPolicyDropOption()
self.DropOptions._deserialize(params.get("DropOptions"))
if params.get("PortLimits") is not None:
self.PortLimits = []
for item in params.get("PortLimits"):
obj = DDoSPolicyPortLimit()
obj._deserialize(item)
self.PortLimits.append(obj)
if params.get("PacketFilters") is not None:
self.PacketFilters = []
for item in params.get("PacketFilters"):
obj = DDoSPolicyPacketFilter()
obj._deserialize(item)
self.PacketFilters.append(obj)
if params.get("IpBlackWhiteLists") is not None:
self.IpBlackWhiteLists = []
for item in params.get("IpBlackWhiteLists"):
obj = IpBlackWhite()
obj._deserialize(item)
self.IpBlackWhiteLists.append(obj)
self.PolicyId = params.get("PolicyId")
self.PolicyName = params.get("PolicyName")
self.CreateTime = params.get("CreateTime")
if params.get("WaterPrint") is not None:
self.WaterPrint = []
for item in params.get("WaterPrint"):
obj = WaterPrintPolicy()
obj._deserialize(item)
self.WaterPrint.append(obj)
if params.get("WaterKey") is not None:
self.WaterKey = []
for item in params.get("WaterKey"):
obj = WaterPrintKey()
obj._deserialize(item)
self.WaterKey.append(obj)
self.BoundResources = params.get("BoundResources")
self.SceneId = params.get("SceneId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteCCFrequencyRulesRequest(AbstractModel):
"""DeleteCCFrequencyRules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param CCFrequencyRuleId: CC防护的访问频率控制规则ID
:type CCFrequencyRuleId: str
"""
self.Business = None
self.CCFrequencyRuleId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.CCFrequencyRuleId = params.get("CCFrequencyRuleId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteCCFrequencyRulesResponse(AbstractModel):
"""DeleteCCFrequencyRules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class DeleteCCSelfDefinePolicyRequest(AbstractModel):
"""DeleteCCSelfDefinePolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param SetId: 策略ID
:type SetId: str
"""
self.Business = None
self.Id = None
self.SetId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.SetId = params.get("SetId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteCCSelfDefinePolicyResponse(AbstractModel):
"""DeleteCCSelfDefinePolicy返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class DeleteDDoSPolicyCaseRequest(AbstractModel):
"""DeleteDDoSPolicyCase请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param SceneId: 策略场景ID
:type SceneId: str
"""
self.Business = None
self.SceneId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.SceneId = params.get("SceneId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteDDoSPolicyCaseResponse(AbstractModel):
"""DeleteDDoSPolicyCase返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class DeleteDDoSPolicyRequest(AbstractModel):
"""DeleteDDoSPolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param PolicyId: 策略ID
:type PolicyId: str
"""
self.Business = None
self.PolicyId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.PolicyId = params.get("PolicyId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteDDoSPolicyResponse(AbstractModel):
"""DeleteDDoSPolicy返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class DeleteL4RulesRequest(AbstractModel):
"""DeleteL4Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleIdList: 规则ID列表
:type RuleIdList: list of str
"""
self.Business = None
self.Id = None
self.RuleIdList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleIdList = params.get("RuleIdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteL4RulesResponse(AbstractModel):
"""DeleteL4Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class DeleteL7RulesRequest(AbstractModel):
"""DeleteL7Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleIdList: 规则ID列表
:type RuleIdList: list of str
"""
self.Business = None
self.Id = None
self.RuleIdList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleIdList = params.get("RuleIdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteL7RulesResponse(AbstractModel):
"""DeleteL7Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class DeleteNewL4RulesRequest(AbstractModel):
"""DeleteNewL4Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Rule: 删除接口结构体
:type Rule: list of L4DelRule
"""
self.Business = None
self.Rule = None
def _deserialize(self, params):
self.Business = params.get("Business")
if params.get("Rule") is not None:
self.Rule = []
for item in params.get("Rule"):
obj = L4DelRule()
obj._deserialize(item)
self.Rule.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteNewL4RulesResponse(AbstractModel):
"""DeleteNewL4Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class DeleteNewL7RulesRequest(AbstractModel):
"""DeleteNewL7Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Rule: 删除规则列表
:type Rule: list of L4DelRule
"""
self.Business = None
self.Rule = None
def _deserialize(self, params):
self.Business = params.get("Business")
if params.get("Rule") is not None:
self.Rule = []
for item in params.get("Rule"):
obj = L4DelRule()
obj._deserialize(item)
self.Rule.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteNewL7RulesResponse(AbstractModel):
"""DeleteNewL7Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class DescribeActionLogRequest(AbstractModel):
"""DescribeActionLog请求参数结构体
"""
def __init__(self):
r"""
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Filter: 搜索值,只支持资源ID或用户UIN
:type Filter: str
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
"""
self.StartTime = None
self.EndTime = None
self.Business = None
self.Filter = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Business = params.get("Business")
self.Filter = params.get("Filter")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeActionLogResponse(AbstractModel):
"""DescribeActionLog返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 总记录数
:type TotalCount: int
:param Data: 记录数组
:type Data: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValueRecord()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeBGPIPL7RuleMaxCntRequest(AbstractModel):
"""DescribeBGPIPL7RuleMaxCnt请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Id: 资源实例ID
:type Id: str
"""
self.Business = None
self.Id = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBGPIPL7RuleMaxCntResponse(AbstractModel):
"""DescribeBGPIPL7RuleMaxCnt返回参数结构体
"""
def __init__(self):
r"""
:param Count: 高防IP最多可添加的7层规则数量
:type Count: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Count = None
self.RequestId = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.RequestId = params.get("RequestId")
class DescribeBaradDataRequest(AbstractModel):
"""DescribeBaradData请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源实例ID
:type Id: str
:param MetricName: 指标名,取值:
connum表示TCP活跃连接数;
new_conn表示新建TCP连接数;
inactive_conn表示非活跃连接数;
intraffic表示入流量;
outtraffic表示出流量;
alltraffic表示出流量和入流量之和;
inpkg表示入包速率;
outpkg表示出包速率;
:type MetricName: str
:param Period: 统计时间粒度,单位秒(300表示5分钟;3600表示小时;86400表示天)
:type Period: int
:param StartTime: 统计开始时间,秒部分保持为0,分钟部分为5的倍数
:type StartTime: str
:param EndTime: 统计结束时间,秒部分保持为0,分钟部分为5的倍数
:type EndTime: str
:param Statistics: 统计方式,取值:
max表示最大值;
min表示最小值;
avg表示均值;
:type Statistics: str
:param ProtocolPort: 协议端口数组
:type ProtocolPort: list of ProtocolPort
:param Ip: 资源实例下的IP,只有当Business=net(高防IP专业版)时才必须填写资源的一个IP(因为高防IP专业版资源实例有多个IP,才需要指定);
:type Ip: str
"""
self.Business = None
self.Id = None
self.MetricName = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.Statistics = None
self.ProtocolPort = None
self.Ip = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.MetricName = params.get("MetricName")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Statistics = params.get("Statistics")
if params.get("ProtocolPort") is not None:
self.ProtocolPort = []
for item in params.get("ProtocolPort"):
obj = ProtocolPort()
obj._deserialize(item)
self.ProtocolPort.append(obj)
self.Ip = params.get("Ip")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBaradDataResponse(AbstractModel):
"""DescribeBaradData返回参数结构体
"""
def __init__(self):
r"""
:param DataList: 返回指标的值
:type DataList: list of BaradData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataList") is not None:
self.DataList = []
for item in params.get("DataList"):
obj = BaradData()
obj._deserialize(item)
self.DataList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeBasicCCThresholdRequest(AbstractModel):
"""DescribeBasicCCThreshold请求参数结构体
"""
def __init__(self):
r"""
:param BasicIp: 查询的IP地址,取值如:1.1.1.1
:type BasicIp: str
:param BasicRegion: 查询IP所属地域,取值如:gz、bj、sh、hk等地域缩写
:type BasicRegion: str
:param BasicBizType: 专区类型,取值如:公有云专区:public,黑石专区:bm, NAT服务器专区:nat,互联网通道:channel。
:type BasicBizType: str
:param BasicDeviceType: 设备类型,取值如:服务器:cvm,公有云负载均衡:clb,黑石负载均衡:lb,NAT服务器:nat,互联网通道:channel.
:type BasicDeviceType: str
:param BasicIpInstance: 可选,IPInstance Nat 网关(如果查询的设备类型是NAT服务器,需要传此参数,通过nat资源查询接口获取)
:type BasicIpInstance: str
:param BasicIspCode: 可选,运营商线路(如果查询的设备类型是NAT服务器,需要传此参数为5)
:type BasicIspCode: int
"""
self.BasicIp = None
self.BasicRegion = None
self.BasicBizType = None
self.BasicDeviceType = None
self.BasicIpInstance = None
self.BasicIspCode = None
def _deserialize(self, params):
self.BasicIp = params.get("BasicIp")
self.BasicRegion = params.get("BasicRegion")
self.BasicBizType = params.get("BasicBizType")
self.BasicDeviceType = params.get("BasicDeviceType")
self.BasicIpInstance = params.get("BasicIpInstance")
self.BasicIspCode = params.get("BasicIspCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBasicCCThresholdResponse(AbstractModel):
"""DescribeBasicCCThreshold返回参数结构体
"""
def __init__(self):
r"""
:param CCEnable: CC启动开关(0:关闭;1:开启)
:type CCEnable: int
:param CCThreshold: CC防护阈值
:type CCThreshold: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CCEnable = None
self.CCThreshold = None
self.RequestId = None
def _deserialize(self, params):
self.CCEnable = params.get("CCEnable")
self.CCThreshold = params.get("CCThreshold")
self.RequestId = params.get("RequestId")
class DescribeBasicDeviceThresholdRequest(AbstractModel):
"""DescribeBasicDeviceThreshold请求参数结构体
"""
def __init__(self):
r"""
:param BasicIp: 查询的IP地址,取值如:1.1.1.1
:type BasicIp: str
:param BasicRegion: 查询IP所属地域,取值如:gz、bj、sh、hk等地域缩写
:type BasicRegion: str
:param BasicBizType: 专区类型,取值如:公有云专区:public,黑石专区:bm, NAT服务器专区:nat,互联网通道:channel。
:type BasicBizType: str
:param BasicDeviceType: 设备类型,取值如:服务器:cvm,公有云负载均衡:clb,黑石负载均衡:lb,NAT服务器:nat,互联网通道:channel.
:type BasicDeviceType: str
:param BasicCheckFlag: 有效性检查,取值为1
:type BasicCheckFlag: int
:param BasicIpInstance: 可选,IPInstance Nat 网关(如果查询的设备类型是NAT服务器,需要传此参数,通过nat资源查询接口获取)
:type BasicIpInstance: str
:param BasicIspCode: 可选,运营商线路(如果查询的设备类型是NAT服务器,需要传此参数为5)
:type BasicIspCode: int
"""
self.BasicIp = None
self.BasicRegion = None
self.BasicBizType = None
self.BasicDeviceType = None
self.BasicCheckFlag = None
self.BasicIpInstance = None
self.BasicIspCode = None
def _deserialize(self, params):
self.BasicIp = params.get("BasicIp")
self.BasicRegion = params.get("BasicRegion")
self.BasicBizType = params.get("BasicBizType")
self.BasicDeviceType = params.get("BasicDeviceType")
self.BasicCheckFlag = params.get("BasicCheckFlag")
self.BasicIpInstance = params.get("BasicIpInstance")
self.BasicIspCode = params.get("BasicIspCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBasicDeviceThresholdResponse(AbstractModel):
"""DescribeBasicDeviceThreshold返回参数结构体
"""
def __init__(self):
r"""
:param Threshold: 返回黑洞封堵值
:type Threshold: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Threshold = None
self.RequestId = None
def _deserialize(self, params):
self.Threshold = params.get("Threshold")
self.RequestId = params.get("RequestId")
class DescribeBizHttpStatusRequest(AbstractModel):
"""DescribeBizHttpStatus请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Id: 资源Id
:type Id: str
:param Period: 统计周期,可取值300,1800,3600, 21600,86400,单位秒
:type Period: int
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param Statistics: 统计方式,仅支持sum
:type Statistics: str
:param ProtoInfo: 协议及端口列表,协议可取值TCP, UDP, HTTP, HTTPS,仅统计纬度为连接数时有效
:type ProtoInfo: list of ProtocolPort
:param Domain: 特定域名查询
:type Domain: str
"""
self.Business = None
self.Id = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.Statistics = None
self.ProtoInfo = None
self.Domain = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Statistics = params.get("Statistics")
if params.get("ProtoInfo") is not None:
self.ProtoInfo = []
for item in params.get("ProtoInfo"):
obj = ProtocolPort()
obj._deserialize(item)
self.ProtoInfo.append(obj)
self.Domain = params.get("Domain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBizHttpStatusResponse(AbstractModel):
"""DescribeBizHttpStatus返回参数结构体
"""
def __init__(self):
r"""
:param HttpStatusMap: 业务流量http状态码统计数据
:type HttpStatusMap: :class:`tencentcloud.dayu.v20180709.models.HttpStatusMap`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.HttpStatusMap = None
self.RequestId = None
def _deserialize(self, params):
if params.get("HttpStatusMap") is not None:
self.HttpStatusMap = HttpStatusMap()
self.HttpStatusMap._deserialize(params.get("HttpStatusMap"))
self.RequestId = params.get("RequestId")
class DescribeBizTrendRequest(AbstractModel):
"""DescribeBizTrend请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Id: 资源实例ID
:type Id: str
:param Period: 统计周期,可取值300,1800,3600,21600,86400,单位秒
:type Period: int
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param Statistics: 统计方式,可取值max, min, avg, sum, 如统计纬度是流量速率或包量速率,仅可取值max
:type Statistics: str
:param MetricName: 统计纬度,可取值connum, new_conn, inactive_conn, intraffic, outtraffic, inpkg, outpkg, qps
:type MetricName: str
:param ProtoInfo: 协议及端口列表,协议可取值TCP, UDP, HTTP, HTTPS,仅统计纬度为连接数时有效
:type ProtoInfo: list of ProtocolPort
:param Domain: 统计纬度为qps时,可选特定域名查询
:type Domain: str
"""
self.Business = None
self.Id = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.Statistics = None
self.MetricName = None
self.ProtoInfo = None
self.Domain = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Statistics = params.get("Statistics")
self.MetricName = params.get("MetricName")
if params.get("ProtoInfo") is not None:
self.ProtoInfo = []
for item in params.get("ProtoInfo"):
obj = ProtocolPort()
obj._deserialize(item)
self.ProtoInfo.append(obj)
self.Domain = params.get("Domain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBizTrendResponse(AbstractModel):
"""DescribeBizTrend返回参数结构体
"""
def __init__(self):
r"""
:param DataList: 曲线图各个时间点的值
:type DataList: list of float
:param MetricName: 统计纬度
:type MetricName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataList = None
self.MetricName = None
self.RequestId = None
def _deserialize(self, params):
self.DataList = params.get("DataList")
self.MetricName = params.get("MetricName")
self.RequestId = params.get("RequestId")
class DescribeCCAlarmThresholdRequest(AbstractModel):
"""DescribeCCAlarmThreshold请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(shield表示棋牌;bgpip表示高防IP;bgp表示高防包;bgp-multip表示多ip高防包;net表示高防IP专业版)
:type Business: str
:param RsId: 资源ID,字符串类型
:type RsId: str
"""
self.Business = None
self.RsId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.RsId = params.get("RsId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCCAlarmThresholdResponse(AbstractModel):
"""DescribeCCAlarmThreshold返回参数结构体
"""
def __init__(self):
r"""
:param CCAlarmThreshold: CC告警阈值
:type CCAlarmThreshold: :class:`tencentcloud.dayu.v20180709.models.CCAlarmThreshold`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CCAlarmThreshold = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CCAlarmThreshold") is not None:
self.CCAlarmThreshold = CCAlarmThreshold()
self.CCAlarmThreshold._deserialize(params.get("CCAlarmThreshold"))
self.RequestId = params.get("RequestId")
class DescribeCCEvListRequest(AbstractModel):
"""DescribeCCEvList请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Id: 资源实例ID
:type Id: str
:param IpList: 资源实例的IP,当business不为basic时,如果IpList不为空则Id也必须不能为空;
:type IpList: list of str
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
"""
self.Business = None
self.StartTime = None
self.EndTime = None
self.Id = None
self.IpList = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Id = params.get("Id")
self.IpList = params.get("IpList")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCCEvListResponse(AbstractModel):
"""DescribeCCEvList返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(shield表示棋牌盾;bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param Id: 资源实例ID
:type Id: str
:param IpList: 资源实例的IP列表
注意:此字段可能返回 null,表示取不到有效值。
:type IpList: list of str
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Data: CC攻击事件列表
:type Data: list of CCEventRecord
:param Total: 总记录数
:type Total: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.IpList = None
self.StartTime = None
self.EndTime = None
self.Data = None
self.Total = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.IpList = params.get("IpList")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = CCEventRecord()
obj._deserialize(item)
self.Data.append(obj)
self.Total = params.get("Total")
self.RequestId = params.get("RequestId")
class DescribeCCFrequencyRulesRequest(AbstractModel):
"""DescribeCCFrequencyRules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleId: 7层转发规则ID(通过获取7层转发规则接口可以获取规则ID);当填写时表示获取转发规则的访问频率控制规则;
:type RuleId: str
"""
self.Business = None
self.Id = None
self.RuleId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleId = params.get("RuleId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCCFrequencyRulesResponse(AbstractModel):
"""DescribeCCFrequencyRules返回参数结构体
"""
def __init__(self):
r"""
:param CCFrequencyRuleList: 访问频率控制规则列表
:type CCFrequencyRuleList: list of CCFrequencyRule
:param CCFrequencyRuleStatus: 访问频率控制规则开关状态,取值[on(开启),off(关闭)]
:type CCFrequencyRuleStatus: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CCFrequencyRuleList = None
self.CCFrequencyRuleStatus = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CCFrequencyRuleList") is not None:
self.CCFrequencyRuleList = []
for item in params.get("CCFrequencyRuleList"):
obj = CCFrequencyRule()
obj._deserialize(item)
self.CCFrequencyRuleList.append(obj)
self.CCFrequencyRuleStatus = params.get("CCFrequencyRuleStatus")
self.RequestId = params.get("RequestId")
class DescribeCCIpAllowDenyRequest(AbstractModel):
"""DescribeCCIpAllowDeny请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Type: 黑或白名单,取值[white(白名单),black(黑名单)]
注意:此数组只能有一个值,不能同时获取黑名单和白名单
:type Type: list of str
:param Limit: 分页参数
:type Limit: int
:param Offset: 分页参数
:type Offset: int
:param Protocol: 可选,代表HTTP协议或HTTPS协议的CC防护,取值[http(HTTP协议的CC防护),https(HTTPS协议的CC防护)];
:type Protocol: str
"""
self.Business = None
self.Id = None
self.Type = None
self.Limit = None
self.Offset = None
self.Protocol = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Type = params.get("Type")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Protocol = params.get("Protocol")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCCIpAllowDenyResponse(AbstractModel):
"""DescribeCCIpAllowDeny返回参数结构体
"""
def __init__(self):
r"""
:param Data: 该字段被RecordList字段替代了,请不要使用
:type Data: list of KeyValue
:param Total: 记录数
:type Total: int
:param RecordList: 返回黑/白名单的记录,
"Key":"ip"时,"Value":值表示ip;
"Key":"domain"时, "Value":值表示域名;
"Key":"type"时,"Value":值表示黑白名单类型(white为白名单,block为黑名单);
"Key":"protocol"时,"Value":值表示CC防护的协议(http或https);
:type RecordList: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.Total = None
self.RecordList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValue()
obj._deserialize(item)
self.Data.append(obj)
self.Total = params.get("Total")
if params.get("RecordList") is not None:
self.RecordList = []
for item in params.get("RecordList"):
obj = KeyValueRecord()
obj._deserialize(item)
self.RecordList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeCCSelfDefinePolicyRequest(AbstractModel):
"""DescribeCCSelfDefinePolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgp高防包;bgp-multip共享包)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Limit: 拉取的条数
:type Limit: int
:param Offset: 偏移量
:type Offset: int
"""
self.Business = None
self.Id = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCCSelfDefinePolicyResponse(AbstractModel):
"""DescribeCCSelfDefinePolicy返回参数结构体
"""
def __init__(self):
r"""
:param Total: 自定义规则总数
:type Total: int
:param Policys: 策略列表
:type Policys: list of CCPolicy
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.Policys = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("Policys") is not None:
self.Policys = []
for item in params.get("Policys"):
obj = CCPolicy()
obj._deserialize(item)
self.Policys.append(obj)
self.RequestId = params.get("RequestId")
class DescribeCCTrendRequest(AbstractModel):
"""DescribeCCTrend请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param Ip: 资源的IP
:type Ip: str
:param MetricName: 指标,取值[inqps(总请求峰值,dropqps(攻击请求峰值))]
:type MetricName: str
:param Period: 统计粒度,取值[300(5分钟),3600(小时),86400(天)]
:type Period: int
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param Id: 资源实例ID,当Business为basic时,此字段不用填写(因为基础防护没有资源实例)
:type Id: str
:param Domain: 域名,可选
:type Domain: str
"""
self.Business = None
self.Ip = None
self.MetricName = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.Id = None
self.Domain = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Ip = params.get("Ip")
self.MetricName = params.get("MetricName")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Id = params.get("Id")
self.Domain = params.get("Domain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCCTrendResponse(AbstractModel):
"""DescribeCCTrend返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param Id: 资源ID
注意:此字段可能返回 null,表示取不到有效值。
:type Id: str
:param Ip: 资源的IP
:type Ip: str
:param MetricName: 指标,取值[inqps(总请求峰值,dropqps(攻击请求峰值))]
:type MetricName: str
:param Period: 统计粒度,取值[300(5分钟),3600(小时),86400(天)]
:type Period: int
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param Data: 值数组
:type Data: list of int non-negative
:param Count: 值个数
:type Count: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.MetricName = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.Data = None
self.Count = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.MetricName = params.get("MetricName")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Data = params.get("Data")
self.Count = params.get("Count")
self.RequestId = params.get("RequestId")
class DescribeCCUrlAllowRequest(AbstractModel):
"""DescribeCCUrlAllow请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Type: 黑或白名单,取值[white(白名单)],目前只支持白名单
注意:此数组只能有一个值,且只能为white
:type Type: list of str
:param Limit: 分页参数
:type Limit: int
:param Offset: 分页参数
:type Offset: int
:param Protocol: 可选,代表HTTP协议或HTTPS协议的CC防护,取值[http(HTTP协议的CC防护),https(HTTPS协议的CC防护)];
:type Protocol: str
"""
self.Business = None
self.Id = None
self.Type = None
self.Limit = None
self.Offset = None
self.Protocol = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Type = params.get("Type")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Protocol = params.get("Protocol")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCCUrlAllowResponse(AbstractModel):
"""DescribeCCUrlAllow返回参数结构体
"""
def __init__(self):
r"""
:param Data: 该字段被RecordList字段替代了,请不要使用
:type Data: list of KeyValue
:param Total: 记录总数
:type Total: int
:param RecordList: 返回黑/白名单的记录,
"Key":"url"时,"Value":值表示URL;
"Key":"domain"时, "Value":值表示域名;
"Key":"type"时,"Value":值表示黑白名单类型(white为白名单,block为黑名单);
"Key":"protocol"时,"Value":值表示CC的防护类型(HTTP防护或HTTPS域名防护);
:type RecordList: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.Total = None
self.RecordList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValue()
obj._deserialize(item)
self.Data.append(obj)
self.Total = params.get("Total")
if params.get("RecordList") is not None:
self.RecordList = []
for item in params.get("RecordList"):
obj = KeyValueRecord()
obj._deserialize(item)
self.RecordList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDDoSAlarmThresholdRequest(AbstractModel):
"""DescribeDDoSAlarmThreshold请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(shield表示棋牌;bgpip表示高防IP;bgp表示高防包;bgp-multip表示多ip高防包;net表示高防IP专业版)
:type Business: str
:param RsId: 资源ID,字符串类型
:type RsId: str
"""
self.Business = None
self.RsId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.RsId = params.get("RsId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSAlarmThresholdResponse(AbstractModel):
"""DescribeDDoSAlarmThreshold返回参数结构体
"""
def __init__(self):
r"""
:param DDoSAlarmThreshold: DDoS告警阈值
:type DDoSAlarmThreshold: :class:`tencentcloud.dayu.v20180709.models.DDoSAlarmThreshold`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DDoSAlarmThreshold = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DDoSAlarmThreshold") is not None:
self.DDoSAlarmThreshold = DDoSAlarmThreshold()
self.DDoSAlarmThreshold._deserialize(params.get("DDoSAlarmThreshold"))
self.RequestId = params.get("RequestId")
class DescribeDDoSAttackIPRegionMapRequest(AbstractModel):
"""DescribeDDoSAttackIPRegionMap请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(shield表示棋牌;bgpip表示高防IP;bgp表示高防包;bgp-multip表示多ip高防包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间,最大可统计的时间范围是半年;
:type EndTime: str
:param IpList: 指定资源的特定IP的攻击源,可选
:type IpList: list of str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.IpList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.IpList = params.get("IpList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSAttackIPRegionMapResponse(AbstractModel):
"""DescribeDDoSAttackIPRegionMap返回参数结构体
"""
def __init__(self):
r"""
:param NationCount: 全球地域分布数据
:type NationCount: list of KeyValueRecord
:param ProvinceCount: 国内省份地域分布数据
:type ProvinceCount: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.NationCount = None
self.ProvinceCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("NationCount") is not None:
self.NationCount = []
for item in params.get("NationCount"):
obj = KeyValueRecord()
obj._deserialize(item)
self.NationCount.append(obj)
if params.get("ProvinceCount") is not None:
self.ProvinceCount = []
for item in params.get("ProvinceCount"):
obj = KeyValueRecord()
obj._deserialize(item)
self.ProvinceCount.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDDoSAttackSourceRequest(AbstractModel):
"""DescribeDDoSAttackSource请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 起始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
:param IpList: 获取指定资源的特定ip的攻击源,可选
:type IpList: list of str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.Limit = None
self.Offset = None
self.IpList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.IpList = params.get("IpList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSAttackSourceResponse(AbstractModel):
"""DescribeDDoSAttackSource返回参数结构体
"""
def __init__(self):
r"""
:param Total: 总攻击源条数
:type Total: int
:param AttackSourceList: 攻击源列表
:type AttackSourceList: list of DDoSAttackSourceRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.AttackSourceList = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("AttackSourceList") is not None:
self.AttackSourceList = []
for item in params.get("AttackSourceList"):
obj = DDoSAttackSourceRecord()
obj._deserialize(item)
self.AttackSourceList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDDoSCountRequest(AbstractModel):
"""DescribeDDoSCount请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Ip: 资源的IP
:type Ip: str
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param MetricName: 指标,取值[traffic(攻击协议流量, 单位KB), pkg(攻击协议报文数), classnum(攻击事件次数)]
:type MetricName: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.StartTime = None
self.EndTime = None
self.MetricName = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.MetricName = params.get("MetricName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSCountResponse(AbstractModel):
"""DescribeDDoSCount返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Ip: 资源的IP
:type Ip: str
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param MetricName: 指标,取值[traffic(攻击协议流量, 单位KB), pkg(攻击协议报文数), classnum(攻击事件次数)]
:type MetricName: str
:param Data: Key-Value值数组,Key说明如下,
当MetricName为traffic时:
key为"TcpKBSum",表示TCP报文流量,单位KB
key为"UdpKBSum",表示UDP报文流量,单位KB
key为"IcmpKBSum",表示ICMP报文流量,单位KB
key为"OtherKBSum",表示其他报文流量,单位KB
当MetricName为pkg时:
key为"TcpPacketSum",表示TCP报文个数,单位个
key为"UdpPacketSum",表示UDP报文个数,单位个
key为"IcmpPacketSum",表示ICMP报文个数,单位个
key为"OtherPacketSum",表示其他报文个数,单位个
当MetricName为classnum时:
key的值表示攻击事件类型,其中Key为"UNKNOWNFLOOD",表示未知的攻击事件
:type Data: list of KeyValue
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.StartTime = None
self.EndTime = None
self.MetricName = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.MetricName = params.get("MetricName")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValue()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDDoSDefendStatusRequest(AbstractModel):
"""DescribeDDoSDefendStatus请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(basic表示基础防护;bgp表示独享包;bgp-multip表示共享包;bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源实例ID,只有当Business不是基础防护时才需要填写此字段;
:type Id: str
:param Ip: 基础防护的IP,只有当Business为基础防护时才需要填写此字段;
:type Ip: str
:param BizType: 只有当Business为基础防护时才需要填写此字段,IP所属的产品类型,取值[public(CVM产品),bm(黑石产品),eni(弹性网卡),vpngw(VPN网关), natgw(NAT网关),waf(Web应用安全产品),fpc(金融产品),gaap(GAAP产品), other(托管IP)]
:type BizType: str
:param DeviceType: 只有当Business为基础防护时才需要填写此字段,IP所属的产品子类,取值[cvm(CVM),lb(负载均衡器),eni(弹性网卡),vpngw(VPN),natgw(NAT),waf(WAF),fpc(金融),gaap(GAAP),other(托管IP),eip(黑石弹性IP)]
:type DeviceType: str
:param InstanceId: 只有当Business为基础防护时才需要填写此字段,IP所属的资源实例ID,当绑定新IP时必须填写此字段;例如是弹性网卡的IP,则InstanceId填写弹性网卡的ID(eni-*);
:type InstanceId: str
:param IPRegion: 只有当Business为基础防护时才需要填写此字段,表示IP所属的地域,取值:
"bj": 华北地区(北京)
"cd": 西南地区(成都)
"cq": 西南地区(重庆)
"gz": 华南地区(广州)
"gzopen": 华南地区(广州Open)
"hk": 中国香港
"kr": 东南亚地区(首尔)
"sh": 华东地区(上海)
"shjr": 华东地区(上海金融)
"szjr": 华南地区(深圳金融)
"sg": 东南亚地区(新加坡)
"th": 东南亚地区(泰国)
"de": 欧洲地区(德国)
"usw": 美国西部(硅谷)
"ca": 北美地区(多伦多)
"jp": 日本
"hzec": 杭州
"in": 印度
"use": 美东地区(弗吉尼亚)
"ru": 俄罗斯
"tpe": 中国台湾
"nj": 南京
:type IPRegion: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.BizType = None
self.DeviceType = None
self.InstanceId = None
self.IPRegion = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.BizType = params.get("BizType")
self.DeviceType = params.get("DeviceType")
self.InstanceId = params.get("InstanceId")
self.IPRegion = params.get("IPRegion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSDefendStatusResponse(AbstractModel):
"""DescribeDDoSDefendStatus返回参数结构体
"""
def __init__(self):
r"""
:param DefendStatus: 防护状态,为0表示防护处于关闭状态,为1表示防护处于开启状态
注意:此字段可能返回 null,表示取不到有效值。
:type DefendStatus: int
:param UndefendExpire: 防护临时关闭的过期时间,当防护状态为开启时此字段为空;
注意:此字段可能返回 null,表示取不到有效值。
:type UndefendExpire: str
:param ShowFlag: 控制台功能展示字段,为1表示控制台功能展示,为0表示控制台功能隐藏
注意:此字段可能返回 null,表示取不到有效值。
:type ShowFlag: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DefendStatus = None
self.UndefendExpire = None
self.ShowFlag = None
self.RequestId = None
def _deserialize(self, params):
self.DefendStatus = params.get("DefendStatus")
self.UndefendExpire = params.get("UndefendExpire")
self.ShowFlag = params.get("ShowFlag")
self.RequestId = params.get("RequestId")
class DescribeDDoSEvInfoRequest(AbstractModel):
"""DescribeDDoSEvInfo请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Ip: 资源的IP
:type Ip: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSEvInfoResponse(AbstractModel):
"""DescribeDDoSEvInfo返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Ip: 资源的IP
:type Ip: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
:param TcpPacketSum: TCP报文攻击包数
:type TcpPacketSum: int
:param TcpKBSum: TCP报文攻击流量,单位KB
:type TcpKBSum: int
:param UdpPacketSum: UDP报文攻击包数
:type UdpPacketSum: int
:param UdpKBSum: UDP报文攻击流量,单位KB
:type UdpKBSum: int
:param IcmpPacketSum: ICMP报文攻击包数
:type IcmpPacketSum: int
:param IcmpKBSum: ICMP报文攻击流量,单位KB
:type IcmpKBSum: int
:param OtherPacketSum: 其他报文攻击包数
:type OtherPacketSum: int
:param OtherKBSum: 其他报文攻击流量,单位KB
:type OtherKBSum: int
:param TotalTraffic: 累计攻击流量,单位KB
:type TotalTraffic: int
:param Mbps: 攻击流量带宽峰值
:type Mbps: int
:param Pps: 攻击包速率峰值
:type Pps: int
:param PcapUrl: PCAP文件下载链接
:type PcapUrl: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.StartTime = None
self.EndTime = None
self.TcpPacketSum = None
self.TcpKBSum = None
self.UdpPacketSum = None
self.UdpKBSum = None
self.IcmpPacketSum = None
self.IcmpKBSum = None
self.OtherPacketSum = None
self.OtherKBSum = None
self.TotalTraffic = None
self.Mbps = None
self.Pps = None
self.PcapUrl = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.TcpPacketSum = params.get("TcpPacketSum")
self.TcpKBSum = params.get("TcpKBSum")
self.UdpPacketSum = params.get("UdpPacketSum")
self.UdpKBSum = params.get("UdpKBSum")
self.IcmpPacketSum = params.get("IcmpPacketSum")
self.IcmpKBSum = params.get("IcmpKBSum")
self.OtherPacketSum = params.get("OtherPacketSum")
self.OtherKBSum = params.get("OtherKBSum")
self.TotalTraffic = params.get("TotalTraffic")
self.Mbps = params.get("Mbps")
self.Pps = params.get("Pps")
self.PcapUrl = params.get("PcapUrl")
self.RequestId = params.get("RequestId")
class DescribeDDoSEvListRequest(AbstractModel):
"""DescribeDDoSEvList请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Id: 资源实例ID,当Business为basic时,此字段不用填写(因为基础防护没有资源实例)
:type Id: str
:param IpList: 资源的IP
:type IpList: list of str
:param OverLoad: 是否超过弹性防护峰值,取值[yes(是),no(否)],填写空字符串时表示不进行过滤
:type OverLoad: str
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
"""
self.Business = None
self.StartTime = None
self.EndTime = None
self.Id = None
self.IpList = None
self.OverLoad = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Id = params.get("Id")
self.IpList = params.get("IpList")
self.OverLoad = params.get("OverLoad")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSEvListResponse(AbstractModel):
"""DescribeDDoSEvList返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param Id: 资源ID
:type Id: str
:param IpList: 资源的IP
注意:此字段可能返回 null,表示取不到有效值。
:type IpList: list of str
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Data: DDoS攻击事件列表
:type Data: list of DDoSEventRecord
:param Total: 总记录数
:type Total: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.IpList = None
self.StartTime = None
self.EndTime = None
self.Data = None
self.Total = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.IpList = params.get("IpList")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = DDoSEventRecord()
obj._deserialize(item)
self.Data.append(obj)
self.Total = params.get("Total")
self.RequestId = params.get("RequestId")
class DescribeDDoSIpLogRequest(AbstractModel):
"""DescribeDDoSIpLog请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Ip: 资源的IP
:type Ip: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSIpLogResponse(AbstractModel):
"""DescribeDDoSIpLog返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Ip: 资源的IP
:type Ip: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
:param Data: IP攻击日志,KeyValue数组,Key-Value取值说明:
Key为"LogTime"时,Value值为IP日志时间
Key为"LogMessage"时,Value值为Ip日志内容
:type Data: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.StartTime = None
self.EndTime = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValueRecord()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDDoSNetCountRequest(AbstractModel):
"""DescribeDDoSNetCount请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param MetricName: 指标,取值[traffic(攻击协议流量, 单位KB), pkg(攻击协议报文数), classnum(攻击事件次数)]
:type MetricName: str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.MetricName = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.MetricName = params.get("MetricName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSNetCountResponse(AbstractModel):
"""DescribeDDoSNetCount返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param MetricName: 指标,取值[traffic(攻击协议流量, 单位KB), pkg(攻击协议报文数), classnum(攻击事件次数)]
:type MetricName: str
:param Data: Key-Value值数组,Key说明如下,
当MetricName为traffic时:
key为"TcpKBSum",表示TCP报文流量,单位KB
key为"UdpKBSum",表示UDP报文流量,单位KB
key为"IcmpKBSum",表示ICMP报文流量,单位KB
key为"OtherKBSum",表示其他报文流量,单位KB
当MetricName为pkg时:
key为"TcpPacketSum",表示TCP报文个数,单位个
key为"UdpPacketSum",表示UDP报文个数,单位个
key为"IcmpPacketSum",表示ICMP报文个数,单位个
key为"OtherPacketSum",表示其他报文个数,单位个
当MetricName为classnum时:
key的值表示攻击事件类型,其中Key为"UNKNOWNFLOOD",表示未知的攻击事件
:type Data: list of KeyValue
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.MetricName = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.MetricName = params.get("MetricName")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValue()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDDoSNetEvInfoRequest(AbstractModel):
"""DescribeDDoSNetEvInfo请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSNetEvInfoResponse(AbstractModel):
"""DescribeDDoSNetEvInfo返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
:param TcpPacketSum: TCP报文攻击包数
:type TcpPacketSum: int
:param TcpKBSum: TCP报文攻击流量,单位KB
:type TcpKBSum: int
:param UdpPacketSum: UDP报文攻击包数
:type UdpPacketSum: int
:param UdpKBSum: UDP报文攻击流量,单位KB
:type UdpKBSum: int
:param IcmpPacketSum: ICMP报文攻击包数
:type IcmpPacketSum: int
:param IcmpKBSum: ICMP报文攻击流量,单位KB
:type IcmpKBSum: int
:param OtherPacketSum: 其他报文攻击包数
:type OtherPacketSum: int
:param OtherKBSum: 其他报文攻击流量,单位KB
:type OtherKBSum: int
:param TotalTraffic: 累计攻击流量,单位KB
:type TotalTraffic: int
:param Mbps: 攻击流量带宽峰值
:type Mbps: int
:param Pps: 攻击包速率峰值
:type Pps: int
:param PcapUrl: PCAP文件下载链接
:type PcapUrl: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.TcpPacketSum = None
self.TcpKBSum = None
self.UdpPacketSum = None
self.UdpKBSum = None
self.IcmpPacketSum = None
self.IcmpKBSum = None
self.OtherPacketSum = None
self.OtherKBSum = None
self.TotalTraffic = None
self.Mbps = None
self.Pps = None
self.PcapUrl = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.TcpPacketSum = params.get("TcpPacketSum")
self.TcpKBSum = params.get("TcpKBSum")
self.UdpPacketSum = params.get("UdpPacketSum")
self.UdpKBSum = params.get("UdpKBSum")
self.IcmpPacketSum = params.get("IcmpPacketSum")
self.IcmpKBSum = params.get("IcmpKBSum")
self.OtherPacketSum = params.get("OtherPacketSum")
self.OtherKBSum = params.get("OtherKBSum")
self.TotalTraffic = params.get("TotalTraffic")
self.Mbps = params.get("Mbps")
self.Pps = params.get("Pps")
self.PcapUrl = params.get("PcapUrl")
self.RequestId = params.get("RequestId")
class DescribeDDoSNetEvListRequest(AbstractModel):
"""DescribeDDoSNetEvList请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSNetEvListResponse(AbstractModel):
"""DescribeDDoSNetEvList返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Data: DDoS攻击事件列表
:type Data: list of DDoSEventRecord
:param Total: 总记录数
:type Total: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.Data = None
self.Total = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = DDoSEventRecord()
obj._deserialize(item)
self.Data.append(obj)
self.Total = params.get("Total")
self.RequestId = params.get("RequestId")
class DescribeDDoSNetIpLogRequest(AbstractModel):
"""DescribeDDoSNetIpLog请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSNetIpLogResponse(AbstractModel):
"""DescribeDDoSNetIpLog返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param StartTime: 攻击开始时间
:type StartTime: str
:param EndTime: 攻击结束时间
:type EndTime: str
:param Data: IP攻击日志,KeyValue数组,Key-Value取值说明:
Key为"LogTime"时,Value值为IP日志时间
Key为"LogMessage"时,Value值为Ip日志内容
:type Data: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValueRecord()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDDoSNetTrendRequest(AbstractModel):
"""DescribeDDoSNetTrend请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param MetricName: 指标,取值[bps(攻击流量带宽,pps(攻击包速率))]
:type MetricName: str
:param Period: 统计粒度,取值[300(5分钟),3600(小时),86400(天)]
:type Period: int
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
"""
self.Business = None
self.Id = None
self.MetricName = None
self.Period = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.MetricName = params.get("MetricName")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSNetTrendResponse(AbstractModel):
"""DescribeDDoSNetTrend返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param MetricName: 指标,取值[bps(攻击流量带宽,pps(攻击包速率))]
:type MetricName: str
:param Period: 统计粒度,取值[300(5分钟),3600(小时),86400(天)]
:type Period: int
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param Data: 值数组
:type Data: list of int non-negative
:param Count: 值个数
:type Count: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.MetricName = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.Data = None
self.Count = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.MetricName = params.get("MetricName")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Data = params.get("Data")
self.Count = params.get("Count")
self.RequestId = params.get("RequestId")
class DescribeDDoSPolicyRequest(AbstractModel):
"""DescribeDDoSPolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 可选字段,资源ID,如果填写则表示该资源绑定的DDoS高级策略
:type Id: str
"""
self.Business = None
self.Id = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSPolicyResponse(AbstractModel):
"""DescribeDDoSPolicy返回参数结构体
"""
def __init__(self):
r"""
:param DDosPolicyList: DDoS高级策略列表
:type DDosPolicyList: list of DDosPolicy
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DDosPolicyList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DDosPolicyList") is not None:
self.DDosPolicyList = []
for item in params.get("DDosPolicyList"):
obj = DDosPolicy()
obj._deserialize(item)
self.DDosPolicyList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDDoSTrendRequest(AbstractModel):
"""DescribeDDoSTrend请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param Ip: 资源实例的IP
:type Ip: str
:param MetricName: 指标,取值[bps(攻击流量带宽,pps(攻击包速率))]
:type MetricName: str
:param Period: 统计粒度,取值[300(5分钟),3600(小时),86400(天)]
:type Period: int
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param Id: 资源实例ID,当Business为basic时,此字段不用填写(因为基础防护没有资源实例)
:type Id: str
"""
self.Business = None
self.Ip = None
self.MetricName = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.Id = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Ip = params.get("Ip")
self.MetricName = params.get("MetricName")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSTrendResponse(AbstractModel):
"""DescribeDDoSTrend返回参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示DDoS基础防护)
:type Business: str
:param Id: 资源ID
注意:此字段可能返回 null,表示取不到有效值。
:type Id: str
:param Ip: 资源的IP
:type Ip: str
:param MetricName: 指标,取值[bps(攻击流量带宽,pps(攻击包速率))]
:type MetricName: str
:param Period: 统计粒度,取值[300(5分钟),3600(小时),86400(天)]
:type Period: int
:param StartTime: 统计开始时间
:type StartTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param Data: 值数组,攻击流量带宽单位为Mbps,包速率单位为pps
:type Data: list of int non-negative
:param Count: 值个数
:type Count: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Business = None
self.Id = None
self.Ip = None
self.MetricName = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.Data = None
self.Count = None
self.RequestId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.MetricName = params.get("MetricName")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Data = params.get("Data")
self.Count = params.get("Count")
self.RequestId = params.get("RequestId")
class DescribeDDoSUsedStatisRequest(AbstractModel):
"""DescribeDDoSUsedStatis请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
"""
self.Business = None
def _deserialize(self, params):
self.Business = params.get("Business")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDDoSUsedStatisResponse(AbstractModel):
"""DescribeDDoSUsedStatis返回参数结构体
"""
def __init__(self):
r"""
:param Data: 字段值,如下:
Days:高防资源使用天数
Attacks:DDoS防护次数
:type Data: list of KeyValue
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValue()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeIPProductInfoRequest(AbstractModel):
"""DescribeIPProductInfo请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgp表示独享包;bgp-multip表示共享包)
:type Business: str
:param IpList: IP列表
:type IpList: list of str
"""
self.Business = None
self.IpList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.IpList = params.get("IpList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeIPProductInfoResponse(AbstractModel):
"""DescribeIPProductInfo返回参数结构体
"""
def __init__(self):
r"""
:param Data: 云产品信息列表,如果没有查询到则返回空数组,值说明如下:
Key为ProductName时,value表示云产品实例的名称;
Key为ProductInstanceId时,value表示云产品实例的ID;
Key为ProductType时,value表示的是云产品的类型(cvm表示云主机、clb表示负载均衡);
Key为IP时,value表示云产品实例的IP;
:type Data: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValueRecord()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeInsurePacksRequest(AbstractModel):
"""DescribeInsurePacks请求参数结构体
"""
def __init__(self):
r"""
:param IdList: 可选字段,保险包套餐ID,当要获取指定ID(例如insure-000000xe)的保险包套餐时请填写此字段;
:type IdList: list of str
"""
self.IdList = None
def _deserialize(self, params):
self.IdList = params.get("IdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeInsurePacksResponse(AbstractModel):
"""DescribeInsurePacks返回参数结构体
"""
def __init__(self):
r"""
:param InsurePacks: 保险包套餐列表
:type InsurePacks: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.InsurePacks = None
self.RequestId = None
def _deserialize(self, params):
if params.get("InsurePacks") is not None:
self.InsurePacks = []
for item in params.get("InsurePacks"):
obj = KeyValueRecord()
obj._deserialize(item)
self.InsurePacks.append(obj)
self.RequestId = params.get("RequestId")
class DescribeIpBlockListRequest(AbstractModel):
"""DescribeIpBlockList请求参数结构体
"""
class DescribeIpBlockListResponse(AbstractModel):
"""DescribeIpBlockList返回参数结构体
"""
def __init__(self):
r"""
:param List: IP封堵列表
:type List: list of IpBlockData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.List = None
self.RequestId = None
def _deserialize(self, params):
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = IpBlockData()
obj._deserialize(item)
self.List.append(obj)
self.RequestId = params.get("RequestId")
class DescribeIpUnBlockListRequest(AbstractModel):
"""DescribeIpUnBlockList请求参数结构体
"""
def __init__(self):
r"""
:param BeginTime: 开始时间
:type BeginTime: str
:param EndTime: 结束时间
:type EndTime: str
:param Ip: IP(不为空时,进行IP过滤)
:type Ip: str
:param Paging: 分页参数(不为空时,进行分页查询),此字段后面会弃用,请用Limit和Offset字段代替;
:type Paging: :class:`tencentcloud.dayu.v20180709.models.Paging`
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
"""
self.BeginTime = None
self.EndTime = None
self.Ip = None
self.Paging = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.BeginTime = params.get("BeginTime")
self.EndTime = params.get("EndTime")
self.Ip = params.get("Ip")
if params.get("Paging") is not None:
self.Paging = Paging()
self.Paging._deserialize(params.get("Paging"))
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeIpUnBlockListResponse(AbstractModel):
"""DescribeIpUnBlockList返回参数结构体
"""
def __init__(self):
r"""
:param BeginTime: 开始时间
:type BeginTime: str
:param EndTime: 结束时间
:type EndTime: str
:param List: IP解封记录
:type List: list of IpUnBlockData
:param Total: 总记录数
:type Total: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BeginTime = None
self.EndTime = None
self.List = None
self.Total = None
self.RequestId = None
def _deserialize(self, params):
self.BeginTime = params.get("BeginTime")
self.EndTime = params.get("EndTime")
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = IpUnBlockData()
obj._deserialize(item)
self.List.append(obj)
self.Total = params.get("Total")
self.RequestId = params.get("RequestId")
class DescribeL4HealthConfigRequest(AbstractModel):
"""DescribeL4HealthConfig请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleIdList: 规则ID数组,当导出所有规则的健康检查配置则不填或填空数组;
:type RuleIdList: list of str
"""
self.Business = None
self.Id = None
self.RuleIdList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleIdList = params.get("RuleIdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeL4HealthConfigResponse(AbstractModel):
"""DescribeL4HealthConfig返回参数结构体
"""
def __init__(self):
r"""
:param HealthConfig: 四层健康检查配置数组
:type HealthConfig: list of L4HealthConfig
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.HealthConfig = None
self.RequestId = None
def _deserialize(self, params):
if params.get("HealthConfig") is not None:
self.HealthConfig = []
for item in params.get("HealthConfig"):
obj = L4HealthConfig()
obj._deserialize(item)
self.HealthConfig.append(obj)
self.RequestId = params.get("RequestId")
class DescribeL4RulesErrHealthRequest(AbstractModel):
"""DescribeL4RulesErrHealth请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
"""
self.Business = None
self.Id = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeL4RulesErrHealthResponse(AbstractModel):
"""DescribeL4RulesErrHealth返回参数结构体
"""
def __init__(self):
r"""
:param Total: 异常规则的总数
:type Total: int
:param ErrHealths: 异常规则列表,返回值说明: Key值为规则ID,Value值为异常IP,多个IP用","分割
:type ErrHealths: list of KeyValue
:param ExtErrHealths: 异常规则列表(提供更多的错误相关信息),返回值说明:
Key值为RuleId时,Value值为规则ID;
Key值为Protocol时,Value值为规则的转发协议;
Key值为VirtualPort时,Value值为规则的转发端口;
Key值为ErrMessage时,Value值为健康检查异常信息;
健康检查异常信息的格式为"SourceIp:1.1.1.1|SourcePort:1234|AbnormalStatTime:1570689065|AbnormalReason:connection time out|Interval:20|CheckNum:6|FailNum:6" 多个源IP的错误信息用,分割,
SourceIp表示源站IP,SourcePort表示源站端口,AbnormalStatTime表示异常时间,AbnormalReason表示异常原因,Interval表示检查周期,CheckNum表示检查次数,FailNum表示失败次数;
:type ExtErrHealths: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.ErrHealths = None
self.ExtErrHealths = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("ErrHealths") is not None:
self.ErrHealths = []
for item in params.get("ErrHealths"):
obj = KeyValue()
obj._deserialize(item)
self.ErrHealths.append(obj)
if params.get("ExtErrHealths") is not None:
self.ExtErrHealths = []
for item in params.get("ExtErrHealths"):
obj = KeyValueRecord()
obj._deserialize(item)
self.ExtErrHealths.append(obj)
self.RequestId = params.get("RequestId")
class DescribeL7HealthConfigRequest(AbstractModel):
"""DescribeL7HealthConfig请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleIdList: 规则ID数组,当导出所有规则的健康检查配置则不填或填空数组;
:type RuleIdList: list of str
"""
self.Business = None
self.Id = None
self.RuleIdList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleIdList = params.get("RuleIdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeL7HealthConfigResponse(AbstractModel):
"""DescribeL7HealthConfig返回参数结构体
"""
def __init__(self):
r"""
:param HealthConfig: 七层健康检查配置数组
:type HealthConfig: list of L7HealthConfig
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.HealthConfig = None
self.RequestId = None
def _deserialize(self, params):
if params.get("HealthConfig") is not None:
self.HealthConfig = []
for item in params.get("HealthConfig"):
obj = L7HealthConfig()
obj._deserialize(item)
self.HealthConfig.append(obj)
self.RequestId = params.get("RequestId")
class DescribeNewL4RulesErrHealthRequest(AbstractModel):
"""DescribeNewL4RulesErrHealth请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param RuleIdList: 规则ID列表
:type RuleIdList: list of str
"""
self.Business = None
self.RuleIdList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.RuleIdList = params.get("RuleIdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNewL4RulesErrHealthResponse(AbstractModel):
"""DescribeNewL4RulesErrHealth返回参数结构体
"""
def __init__(self):
r"""
:param Total: 异常规则的总数
:type Total: int
:param ErrHealths: 异常规则列表,返回值说明: Key值为规则ID,Value值为异常IP,多个IP用","分割
:type ErrHealths: list of KeyValue
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.ErrHealths = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("ErrHealths") is not None:
self.ErrHealths = []
for item in params.get("ErrHealths"):
obj = KeyValue()
obj._deserialize(item)
self.ErrHealths.append(obj)
self.RequestId = params.get("RequestId")
class DescribeNewL4RulesRequest(AbstractModel):
"""DescribeNewL4Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Ip: 指定IP查询
:type Ip: str
:param VirtualPort: 指定高防IP端口查询
:type VirtualPort: int
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
"""
self.Business = None
self.Ip = None
self.VirtualPort = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Ip = params.get("Ip")
self.VirtualPort = params.get("VirtualPort")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNewL4RulesResponse(AbstractModel):
"""DescribeNewL4Rules返回参数结构体
"""
def __init__(self):
r"""
:param Rules: 转发规则列表
:type Rules: list of NewL4RuleEntry
:param Total: 总规则数
:type Total: int
:param Healths: 四层健康检查配置列表
:type Healths: list of L4RuleHealth
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.Total = None
self.Healths = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = NewL4RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
self.Total = params.get("Total")
if params.get("Healths") is not None:
self.Healths = []
for item in params.get("Healths"):
obj = L4RuleHealth()
obj._deserialize(item)
self.Healths.append(obj)
self.RequestId = params.get("RequestId")
class DescribeNewL7RulesErrHealthRequest(AbstractModel):
"""DescribeNewL7RulesErrHealth请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param RuleIdList: 规则Id列表
:type RuleIdList: list of str
"""
self.Business = None
self.RuleIdList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.RuleIdList = params.get("RuleIdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNewL7RulesErrHealthResponse(AbstractModel):
"""DescribeNewL7RulesErrHealth返回参数结构体
"""
def __init__(self):
r"""
:param Total: 异常规则的总数
:type Total: int
:param ErrHealths: 异常规则列表,返回值说明: Key值为规则ID,Value值为异常IP及错误信息,多个IP用","分割
:type ErrHealths: list of KeyValue
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.ErrHealths = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("ErrHealths") is not None:
self.ErrHealths = []
for item in params.get("ErrHealths"):
obj = KeyValue()
obj._deserialize(item)
self.ErrHealths.append(obj)
self.RequestId = params.get("RequestId")
class DescribePackIndexRequest(AbstractModel):
"""DescribePackIndex请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示高防包;net表示高防IP专业版)
:type Business: str
"""
self.Business = None
def _deserialize(self, params):
self.Business = params.get("Business")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribePackIndexResponse(AbstractModel):
"""DescribePackIndex返回参数结构体
"""
def __init__(self):
r"""
:param Data: 字段值,如下:
TotalPackCount:资源数
AttackPackCount:清洗中的资源数
BlockPackCount:封堵中的资源数
ExpiredPackCount:过期的资源数
ExpireingPackCount:即将过期的资源数
IsolatePackCount:隔离中的资源数
:type Data: list of KeyValue
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValue()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribePcapRequest(AbstractModel):
"""DescribePcap请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源实例ID
:type Id: str
:param StartTime: 攻击事件的开始时间,格式为"2018-08-28 07:00:00"
:type StartTime: str
:param EndTime: 攻击事件的结束时间,格式为"2018-08-28 07:02:00"
:type EndTime: str
:param Ip: 资源的IP,只有当Business为net时才需要填写资源实例下的IP;
:type Ip: str
"""
self.Business = None
self.Id = None
self.StartTime = None
self.EndTime = None
self.Ip = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Ip = params.get("Ip")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribePcapResponse(AbstractModel):
"""DescribePcap返回参数结构体
"""
def __init__(self):
r"""
:param PcapUrlList: pcap包的下载链接列表,无pcap包时为空数组;
:type PcapUrlList: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PcapUrlList = None
self.RequestId = None
def _deserialize(self, params):
self.PcapUrlList = params.get("PcapUrlList")
self.RequestId = params.get("RequestId")
class DescribePolicyCaseRequest(AbstractModel):
"""DescribePolicyCase请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param SceneId: 策略场景ID
:type SceneId: str
"""
self.Business = None
self.SceneId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.SceneId = params.get("SceneId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribePolicyCaseResponse(AbstractModel):
"""DescribePolicyCase返回参数结构体
"""
def __init__(self):
r"""
:param CaseList: 策略场景列表
:type CaseList: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CaseList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CaseList") is not None:
self.CaseList = []
for item in params.get("CaseList"):
obj = KeyValueRecord()
obj._deserialize(item)
self.CaseList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeResIpListRequest(AbstractModel):
"""DescribeResIpList请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param IdList: 资源ID, 如果不填,则获取用户所有资源的IP
:type IdList: list of str
"""
self.Business = None
self.IdList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.IdList = params.get("IdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeResIpListResponse(AbstractModel):
"""DescribeResIpList返回参数结构体
"""
def __init__(self):
r"""
:param Resource: 资源的IP列表
:type Resource: list of ResourceIp
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Resource = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Resource") is not None:
self.Resource = []
for item in params.get("Resource"):
obj = ResourceIp()
obj._deserialize(item)
self.Resource.append(obj)
self.RequestId = params.get("RequestId")
class DescribeResourceListRequest(AbstractModel):
"""DescribeResourceList请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgp表示独享包;bgp-multip表示共享包;bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param RegionList: 地域码搜索,可选,当不指定地域时空数组,当指定地域时,填地域码。例如:["gz", "sh"]
:type RegionList: list of str
:param Line: 线路搜索,可选,只有当获取高防IP资源列表是可以选填,取值为[1(BGP线路),2(南京电信),3(南京联通),99(第三方合作线路)],当获取其他产品时请填空数组;
:type Line: list of int non-negative
:param IdList: 资源ID搜索,可选,当不为空数组时表示获取指定资源的资源列表;
:type IdList: list of str
:param Name: 资源名称搜索,可选,当不为空字符串时表示按名称搜索资源;
:type Name: str
:param IpList: IP搜索列表,可选,当不为空时表示按照IP搜索资源;
:type IpList: list of str
:param Status: 资源状态搜索列表,可选,取值为[0(运行中), 1(清洗中), 2(封堵中)],当填空数组时不进行状态搜索;
:type Status: list of int non-negative
:param Expire: 即将到期搜索;可选,取值为[0(不搜索),1(搜索即将到期的资源)]
:type Expire: int
:param OderBy: 排序字段,可选
:type OderBy: list of OrderBy
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
:param CName: 高防IP专业版资源的CNAME,可选,只对高防IP专业版资源列表有效;
:type CName: str
:param Domain: 高防IP专业版资源的域名,可选,只对高防IP专业版资源列表有效;
:type Domain: str
"""
self.Business = None
self.RegionList = None
self.Line = None
self.IdList = None
self.Name = None
self.IpList = None
self.Status = None
self.Expire = None
self.OderBy = None
self.Limit = None
self.Offset = None
self.CName = None
self.Domain = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.RegionList = params.get("RegionList")
self.Line = params.get("Line")
self.IdList = params.get("IdList")
self.Name = params.get("Name")
self.IpList = params.get("IpList")
self.Status = params.get("Status")
self.Expire = params.get("Expire")
if params.get("OderBy") is not None:
self.OderBy = []
for item in params.get("OderBy"):
obj = OrderBy()
obj._deserialize(item)
self.OderBy.append(obj)
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.CName = params.get("CName")
self.Domain = params.get("Domain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeResourceListResponse(AbstractModel):
"""DescribeResourceList返回参数结构体
"""
def __init__(self):
r"""
:param Total: 总记录数
:type Total: int
:param ServicePacks: 资源记录列表,返回Key值说明:
"Key": "CreateTime" 表示资源实例购买时间
"Key": "Region" 表示资源实例的地域
"Key": "BoundIP" 表示独享包实例绑定的IP
"Key": "Id" 表示资源实例的ID
"Key": "CCEnabled" 表示资源实例的CC防护开关状态
"Key": "DDoSThreshold" 表示资源实例的DDoS的清洗阈值
"Key": "BoundStatus" 表示独享包或共享包实例的绑定IP操作状态(绑定中或绑定完成)
"Key": "Type" 此字段弃用
"Key": "ElasticLimit" 表示资源实例的弹性防护值
"Key": "DDoSAI" 表示资源实例的DDoS AI防护开关
"Key": "OverloadCount" 表示资源实例受到超过弹性防护值的次数
"Key": "Status" 表示资源实例的状态(idle:运行中, attacking:攻击中, blocking:封堵中, isolate:隔离中)
"Key": "Lbid" 此字段弃用
"Key": "ShowFlag" 此字段弃用
"Key": "Expire" 表示资源实例的过期时间
"Key": "CCThreshold" 表示资源实例的CC防护触发阈值
"Key": "AutoRenewFlag" 表示资源实例的自动续费是否开启
"Key": "IspCode" 表示独享包或共享包的线路(0-电信, 1-联通, 2-移动, 5-BGP)
"Key": "PackType" 表示套餐包类型
"Key": "PackId" 表示套餐包ID
"Key": "Name" 表示资源实例的名称
"Key": "Locked" 此字段弃用
"Key": "IpDDoSLevel" 表示资源实例的防护等级(low-宽松, middle-正常, high-严格)
"Key": "DefendStatus" 表示资源实例的DDoS防护状态(防护开启或临时关闭)
"Key": "UndefendExpire" 表示资源实例的DDoS防护临时关闭结束时间
"Key": "Tgw" 表示资源实例是否是新资源
"Key": "Bandwidth" 表示资源实例的保底防护值,只针对高防包和高防IP
"Key": "DdosMax" 表示资源实例的保底防护值,只针对高防IP专业版
"Key": "GFBandwidth" 表示资源实例的保底业务带宽,只针对高防IP
"Key": "ServiceBandwidth" 表示资源实例的保底业务带宽,只针对高防IP专业版
:type ServicePacks: list of KeyValueRecord
:param Business: 大禹子产品代号(bgp表示独享包;bgp-multip表示共享包;bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.ServicePacks = None
self.Business = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("ServicePacks") is not None:
self.ServicePacks = []
for item in params.get("ServicePacks"):
obj = KeyValueRecord()
obj._deserialize(item)
self.ServicePacks.append(obj)
self.Business = params.get("Business")
self.RequestId = params.get("RequestId")
class DescribeRuleSetsRequest(AbstractModel):
"""DescribeRuleSets请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param IdList: 资源ID列表
:type IdList: list of str
"""
self.Business = None
self.IdList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.IdList = params.get("IdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeRuleSetsResponse(AbstractModel):
"""DescribeRuleSets返回参数结构体
"""
def __init__(self):
r"""
:param L4RuleSets: 规则记录数数组,取值说明:
Key值为"Id"时,Value表示资源ID
Key值为"RuleIdList"时,Value值表示资源的规则ID,多个规则ID用","分割
Key值为"RuleNameList"时,Value值表示资源的规则名,多个规则名用","分割
Key值为"RuleNum"时,Value值表示资源的规则数
:type L4RuleSets: list of KeyValueRecord
:param L7RuleSets: 规则记录数数组,取值说明:
Key值为"Id"时,Value表示资源ID
Key值为"RuleIdList"时,Value值表示资源的规则ID,多个规则ID用","分割
Key值为"RuleNameList"时,Value值表示资源的规则名,多个规则名用","分割
Key值为"RuleNum"时,Value值表示资源的规则数
:type L7RuleSets: list of KeyValueRecord
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.L4RuleSets = None
self.L7RuleSets = None
self.RequestId = None
def _deserialize(self, params):
if params.get("L4RuleSets") is not None:
self.L4RuleSets = []
for item in params.get("L4RuleSets"):
obj = KeyValueRecord()
obj._deserialize(item)
self.L4RuleSets.append(obj)
if params.get("L7RuleSets") is not None:
self.L7RuleSets = []
for item in params.get("L7RuleSets"):
obj = KeyValueRecord()
obj._deserialize(item)
self.L7RuleSets.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSchedulingDomainListRequest(AbstractModel):
"""DescribeSchedulingDomainList请求参数结构体
"""
def __init__(self):
r"""
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
:param Domain: 可选,筛选特定的域名
:type Domain: str
"""
self.Limit = None
self.Offset = None
self.Domain = None
def _deserialize(self, params):
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Domain = params.get("Domain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSchedulingDomainListResponse(AbstractModel):
"""DescribeSchedulingDomainList返回参数结构体
"""
def __init__(self):
r"""
:param Total: 调度域名总数
:type Total: int
:param DomainList: 调度域名列表信息
:type DomainList: list of SchedulingDomain
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.DomainList = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("DomainList") is not None:
self.DomainList = []
for item in params.get("DomainList"):
obj = SchedulingDomain()
obj._deserialize(item)
self.DomainList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSecIndexRequest(AbstractModel):
"""DescribeSecIndex请求参数结构体
"""
class DescribeSecIndexResponse(AbstractModel):
"""DescribeSecIndex返回参数结构体
"""
def __init__(self):
r"""
:param Data: 字段值,如下:
AttackIpCount:受攻击的IP数
AttackCount:攻击次数
BlockCount:封堵次数
MaxMbps:攻击峰值Mbps
IpNum:统计的IP数据
:type Data: list of KeyValue
:param BeginDate: 本月开始时间
:type BeginDate: str
:param EndDate: 本月结束时间
:type EndDate: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.BeginDate = None
self.EndDate = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = KeyValue()
obj._deserialize(item)
self.Data.append(obj)
self.BeginDate = params.get("BeginDate")
self.EndDate = params.get("EndDate")
self.RequestId = params.get("RequestId")
class DescribeSourceIpSegmentRequest(AbstractModel):
"""DescribeSourceIpSegment请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
"""
self.Business = None
self.Id = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSourceIpSegmentResponse(AbstractModel):
"""DescribeSourceIpSegment返回参数结构体
"""
def __init__(self):
r"""
:param Data: 回源IP段,多个用";"分隔
:type Data: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Data = params.get("Data")
self.RequestId = params.get("RequestId")
class DescribeTransmitStatisRequest(AbstractModel):
"""DescribeTransmitStatis请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版;bgp表示独享包;bgp-multip表示共享包)
:type Business: str
:param Id: 资源实例ID
:type Id: str
:param MetricName: 指标名,取值:
traffic表示流量带宽;
pkg表示包速率;
:type MetricName: str
:param Period: 统计时间粒度(300表示5分钟;3600表示小时;86400表示天)
:type Period: int
:param StartTime: 统计开始时间,秒部分保持为0,分钟部分为5的倍数
:type StartTime: str
:param EndTime: 统计结束时间,秒部分保持为0,分钟部分为5的倍数
:type EndTime: str
:param IpList: 资源的IP(当Business为bgp-multip时必填,且仅支持一个IP);当不填写时,默认统计资源实例的所有IP;资源实例有多个IP(比如高防IP专业版)时,统计方式是求和;
:type IpList: list of str
"""
self.Business = None
self.Id = None
self.MetricName = None
self.Period = None
self.StartTime = None
self.EndTime = None
self.IpList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.MetricName = params.get("MetricName")
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.IpList = params.get("IpList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTransmitStatisResponse(AbstractModel):
"""DescribeTransmitStatis返回参数结构体
"""
def __init__(self):
r"""
:param InDataList: 当MetricName=traffic时,表示入流量带宽,单位bps;
当MetricName=pkg时,表示入包速率,单位pps;
:type InDataList: list of float
:param OutDataList: 当MetricName=traffic时,表示出流量带宽,单位bps;
当MetricName=pkg时,表示出包速率,单位pps;
:type OutDataList: list of float
:param MetricName: 指标名:
traffic表示流量带宽;
pkg表示包速率;
:type MetricName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.InDataList = None
self.OutDataList = None
self.MetricName = None
self.RequestId = None
def _deserialize(self, params):
self.InDataList = params.get("InDataList")
self.OutDataList = params.get("OutDataList")
self.MetricName = params.get("MetricName")
self.RequestId = params.get("RequestId")
class DescribeUnBlockStatisRequest(AbstractModel):
"""DescribeUnBlockStatis请求参数结构体
"""
class DescribeUnBlockStatisResponse(AbstractModel):
"""DescribeUnBlockStatis返回参数结构体
"""
def __init__(self):
r"""
:param Total: 解封总配额数
:type Total: int
:param Used: 已使用次数
:type Used: int
:param BeginTime: 统计起始时间
:type BeginTime: str
:param EndTime: 统计结束时间
:type EndTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.Used = None
self.BeginTime = None
self.EndTime = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
self.Used = params.get("Used")
self.BeginTime = params.get("BeginTime")
self.EndTime = params.get("EndTime")
self.RequestId = params.get("RequestId")
class DescribleL4RulesRequest(AbstractModel):
"""DescribleL4Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleIdList: 规则ID,可选参数,填写后获取指定的规则
:type RuleIdList: list of str
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
"""
self.Business = None
self.Id = None
self.RuleIdList = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleIdList = params.get("RuleIdList")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribleL4RulesResponse(AbstractModel):
"""DescribleL4Rules返回参数结构体
"""
def __init__(self):
r"""
:param Rules: 转发规则列表
:type Rules: list of L4RuleEntry
:param Total: 总规则数
:type Total: int
:param Healths: 健康检查配置列表
:type Healths: list of L4RuleHealth
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.Total = None
self.Healths = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = L4RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
self.Total = params.get("Total")
if params.get("Healths") is not None:
self.Healths = []
for item in params.get("Healths"):
obj = L4RuleHealth()
obj._deserialize(item)
self.Healths.append(obj)
self.RequestId = params.get("RequestId")
class DescribleL7RulesRequest(AbstractModel):
"""DescribleL7Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleIdList: 规则ID,可选参数,填写后获取指定的规则
:type RuleIdList: list of str
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
:param Domain: 域名搜索,选填,当需要搜索域名请填写
:type Domain: str
:param ProtocolList: 转发协议搜索,选填,取值[http, https, http/https]
:type ProtocolList: list of str
:param StatusList: 状态搜索,选填,取值[0(规则配置成功),1(规则配置生效中),2(规则配置失败),3(规则删除生效中),5(规则删除失败),6(规则等待配置),7(规则等待删除),8(规则待配置证书)]
:type StatusList: list of int non-negative
"""
self.Business = None
self.Id = None
self.RuleIdList = None
self.Limit = None
self.Offset = None
self.Domain = None
self.ProtocolList = None
self.StatusList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleIdList = params.get("RuleIdList")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Domain = params.get("Domain")
self.ProtocolList = params.get("ProtocolList")
self.StatusList = params.get("StatusList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribleL7RulesResponse(AbstractModel):
"""DescribleL7Rules返回参数结构体
"""
def __init__(self):
r"""
:param Rules: 转发规则列表
:type Rules: list of L7RuleEntry
:param Total: 总规则数
:type Total: int
:param Healths: 健康检查配置列表
:type Healths: list of L7RuleHealth
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.Total = None
self.Healths = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = L7RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
self.Total = params.get("Total")
if params.get("Healths") is not None:
self.Healths = []
for item in params.get("Healths"):
obj = L7RuleHealth()
obj._deserialize(item)
self.Healths.append(obj)
self.RequestId = params.get("RequestId")
class DescribleNewL7RulesRequest(AbstractModel):
"""DescribleNewL7Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Limit: 一页条数,填0表示不分页
:type Limit: int
:param Offset: 页起始偏移,取值为(页码-1)*一页条数
:type Offset: int
:param Domain: 域名搜索,选填,当需要搜索域名请填写
:type Domain: str
:param ProtocolList: 转发协议搜索,选填,取值[http, https, http/https]
:type ProtocolList: list of str
:param StatusList: 状态搜索,选填,取值[0(规则配置成功),1(规则配置生效中),2(规则配置失败),3(规则删除生效中),5(规则删除失败),6(规则等待配置),7(规则等待删除),8(规则待配置证书)]
:type StatusList: list of int non-negative
:param Ip: IP搜索,选填,当需要搜索IP请填写
:type Ip: str
"""
self.Business = None
self.Limit = None
self.Offset = None
self.Domain = None
self.ProtocolList = None
self.StatusList = None
self.Ip = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Domain = params.get("Domain")
self.ProtocolList = params.get("ProtocolList")
self.StatusList = params.get("StatusList")
self.Ip = params.get("Ip")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribleNewL7RulesResponse(AbstractModel):
"""DescribleNewL7Rules返回参数结构体
"""
def __init__(self):
r"""
:param Rules: 转发规则列表
:type Rules: list of NewL7RuleEntry
:param Total: 总规则数
:type Total: int
:param Healths: 健康检查配置列表
:type Healths: list of L7RuleHealth
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.Total = None
self.Healths = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = NewL7RuleEntry()
obj._deserialize(item)
self.Rules.append(obj)
self.Total = params.get("Total")
if params.get("Healths") is not None:
self.Healths = []
for item in params.get("Healths"):
obj = L7RuleHealth()
obj._deserialize(item)
self.Healths.append(obj)
self.RequestId = params.get("RequestId")
class DescribleRegionCountRequest(AbstractModel):
"""DescribleRegionCount请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;)
:type Business: str
:param LineList: 根据线路统计,取值为[1(BGP线路),2(南京电信),3(南京联通),99(第三方合作线路)];只对高防IP产品有效,其他产品此字段忽略
:type LineList: list of int non-negative
"""
self.Business = None
self.LineList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.LineList = params.get("LineList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribleRegionCountResponse(AbstractModel):
"""DescribleRegionCount返回参数结构体
"""
def __init__(self):
r"""
:param RegionList: 地域资源实例数
:type RegionList: list of RegionInstanceCount
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RegionList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("RegionList") is not None:
self.RegionList = []
for item in params.get("RegionList"):
obj = RegionInstanceCount()
obj._deserialize(item)
self.RegionList.append(obj)
self.RequestId = params.get("RequestId")
class HttpStatusMap(AbstractModel):
"""业务流量的http状态码聚合数据
"""
def __init__(self):
r"""
:param Http2xx: http2xx状态码
:type Http2xx: list of float
:param Http3xx: http3xx状态码
:type Http3xx: list of float
:param Http404: http404状态码
:type Http404: list of float
:param Http4xx: http4xx状态码
:type Http4xx: list of float
:param Http5xx: http5xx状态码
:type Http5xx: list of float
:param SourceHttp2xx: http2xx回源状态码
:type SourceHttp2xx: list of float
:param SourceHttp3xx: http3xx回源状态码
:type SourceHttp3xx: list of float
:param SourceHttp404: http404回源状态码
:type SourceHttp404: list of float
:param SourceHttp4xx: http4xx回源状态码
:type SourceHttp4xx: list of float
:param SourceHttp5xx: http5xx回源状态码
:type SourceHttp5xx: list of float
"""
self.Http2xx = None
self.Http3xx = None
self.Http404 = None
self.Http4xx = None
self.Http5xx = None
self.SourceHttp2xx = None
self.SourceHttp3xx = None
self.SourceHttp404 = None
self.SourceHttp4xx = None
self.SourceHttp5xx = None
def _deserialize(self, params):
self.Http2xx = params.get("Http2xx")
self.Http3xx = params.get("Http3xx")
self.Http404 = params.get("Http404")
self.Http4xx = params.get("Http4xx")
self.Http5xx = params.get("Http5xx")
self.SourceHttp2xx = params.get("SourceHttp2xx")
self.SourceHttp3xx = params.get("SourceHttp3xx")
self.SourceHttp404 = params.get("SourceHttp404")
self.SourceHttp4xx = params.get("SourceHttp4xx")
self.SourceHttp5xx = params.get("SourceHttp5xx")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IpBlackWhite(AbstractModel):
"""黑白IP
"""
def __init__(self):
r"""
:param Ip: IP地址
:type Ip: str
:param Type: 黑白类型,取值范围[black,white]
:type Type: str
"""
self.Ip = None
self.Type = None
def _deserialize(self, params):
self.Ip = params.get("Ip")
self.Type = params.get("Type")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IpBlockData(AbstractModel):
"""IP封堵记录
"""
def __init__(self):
r"""
:param Ip: IP
:type Ip: str
:param Status: 状态(Blocked:被封堵;UnBlocking:解封中;UnBlockFailed:解封失败)
:type Status: str
:param BlockTime: 封堵时间
:type BlockTime: str
:param UnBlockTime: 解封时间(预计解封时间)
:type UnBlockTime: str
:param ActionType: 解封类型(user:自助解封;auto:自动解封; update:升级解封;bind:绑定高防包解封)
:type ActionType: str
"""
self.Ip = None
self.Status = None
self.BlockTime = None
self.UnBlockTime = None
self.ActionType = None
def _deserialize(self, params):
self.Ip = params.get("Ip")
self.Status = params.get("Status")
self.BlockTime = params.get("BlockTime")
self.UnBlockTime = params.get("UnBlockTime")
self.ActionType = params.get("ActionType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IpUnBlockData(AbstractModel):
"""IP解封记录
"""
def __init__(self):
r"""
:param Ip: IP
:type Ip: str
:param BlockTime: 封堵时间
:type BlockTime: str
:param UnBlockTime: 解封时间(实际解封时间)
:type UnBlockTime: str
:param ActionType: 解封类型(user:自助解封;auto:自动解封; update:升级解封;bind:绑定高防包解封)
:type ActionType: str
"""
self.Ip = None
self.BlockTime = None
self.UnBlockTime = None
self.ActionType = None
def _deserialize(self, params):
self.Ip = params.get("Ip")
self.BlockTime = params.get("BlockTime")
self.UnBlockTime = params.get("UnBlockTime")
self.ActionType = params.get("ActionType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KeyValue(AbstractModel):
"""字段值,K-V形式
"""
def __init__(self):
r"""
:param Key: 字段名称
:type Key: str
:param Value: 字段取值
:type Value: str
"""
self.Key = None
self.Value = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KeyValueRecord(AbstractModel):
"""KeyValue记录
"""
def __init__(self):
r"""
:param Record: 一条记录的Key-Value数组
:type Record: list of KeyValue
"""
self.Record = None
def _deserialize(self, params):
if params.get("Record") is not None:
self.Record = []
for item in params.get("Record"):
obj = KeyValue()
obj._deserialize(item)
self.Record.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class L4DelRule(AbstractModel):
"""删除l4规则接口
"""
def __init__(self):
r"""
:param Id: 资源Id
:type Id: str
:param Ip: 资源IP
:type Ip: str
:param RuleIdList: 规则Id
:type RuleIdList: list of str
"""
self.Id = None
self.Ip = None
self.RuleIdList = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.RuleIdList = params.get("RuleIdList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class L4HealthConfig(AbstractModel):
"""四层健康检查配置
"""
def __init__(self):
r"""
:param Protocol: 转发协议,取值[TCP, UDP]
:type Protocol: str
:param VirtualPort: 转发端口
:type VirtualPort: int
:param Enable: =1表示开启;=0表示关闭
:type Enable: int
:param TimeOut: 响应超时时间,单位秒
:type TimeOut: int
:param Interval: 检测间隔时间,单位秒
:type Interval: int
:param KickNum: 不健康阈值,单位次
:type KickNum: int
:param AliveNum: 健康阈值,单位次
:type AliveNum: int
:param KeepTime: 会话保持时间,单位秒
:type KeepTime: int
"""
self.Protocol = None
self.VirtualPort = None
self.Enable = None
self.TimeOut = None
self.Interval = None
self.KickNum = None
self.AliveNum = None
self.KeepTime = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.VirtualPort = params.get("VirtualPort")
self.Enable = params.get("Enable")
self.TimeOut = params.get("TimeOut")
self.Interval = params.get("Interval")
self.KickNum = params.get("KickNum")
self.AliveNum = params.get("AliveNum")
self.KeepTime = params.get("KeepTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class L4RuleEntry(AbstractModel):
"""L4规则
"""
def __init__(self):
r"""
:param Protocol: 转发协议,取值[TCP, UDP]
:type Protocol: str
:param VirtualPort: 转发端口
:type VirtualPort: int
:param SourcePort: 源站端口
:type SourcePort: int
:param SourceType: 回源方式,取值[1(域名回源),2(IP回源)]
:type SourceType: int
:param KeepTime: 会话保持时间,单位秒
:type KeepTime: int
:param SourceList: 回源列表
:type SourceList: list of L4RuleSource
:param LbType: 负载均衡方式,取值[1(加权轮询),2(源IP hash)]
:type LbType: int
:param KeepEnable: 会话保持开关,取值[0(会话保持关闭),1(会话保持开启)];
:type KeepEnable: int
:param RuleId: 规则ID
:type RuleId: str
:param RuleName: 规则描述
:type RuleName: str
:param RemoveSwitch: 移除水印状态,取值[0(关闭),1(开启)]
:type RemoveSwitch: int
"""
self.Protocol = None
self.VirtualPort = None
self.SourcePort = None
self.SourceType = None
self.KeepTime = None
self.SourceList = None
self.LbType = None
self.KeepEnable = None
self.RuleId = None
self.RuleName = None
self.RemoveSwitch = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.VirtualPort = params.get("VirtualPort")
self.SourcePort = params.get("SourcePort")
self.SourceType = params.get("SourceType")
self.KeepTime = params.get("KeepTime")
if params.get("SourceList") is not None:
self.SourceList = []
for item in params.get("SourceList"):
obj = L4RuleSource()
obj._deserialize(item)
self.SourceList.append(obj)
self.LbType = params.get("LbType")
self.KeepEnable = params.get("KeepEnable")
self.RuleId = params.get("RuleId")
self.RuleName = params.get("RuleName")
self.RemoveSwitch = params.get("RemoveSwitch")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class L4RuleHealth(AbstractModel):
"""规则健康检查参数
"""
def __init__(self):
r"""
:param RuleId: 规则ID
:type RuleId: str
:param Enable: =1表示开启;=0表示关闭
:type Enable: int
:param TimeOut: 响应超时时间,单位秒
:type TimeOut: int
:param Interval: 检测间隔时间,单位秒,必须要大于响应超时时间
:type Interval: int
:param KickNum: 不健康阈值,单位次
:type KickNum: int
:param AliveNum: 健康阈值,单位次
:type AliveNum: int
"""
self.RuleId = None
self.Enable = None
self.TimeOut = None
self.Interval = None
self.KickNum = None
self.AliveNum = None
def _deserialize(self, params):
self.RuleId = params.get("RuleId")
self.Enable = params.get("Enable")
self.TimeOut = params.get("TimeOut")
self.Interval = params.get("Interval")
self.KickNum = params.get("KickNum")
self.AliveNum = params.get("AliveNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class L4RuleSource(AbstractModel):
"""L4规则回源列表
"""
def __init__(self):
r"""
:param Source: 回源IP或域名
:type Source: str
:param Weight: 权重值,取值[0,100]
:type Weight: int
"""
self.Source = None
self.Weight = None
def _deserialize(self, params):
self.Source = params.get("Source")
self.Weight = params.get("Weight")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class L7HealthConfig(AbstractModel):
"""七层健康检查配置
"""
def __init__(self):
r"""
:param Protocol: 转发协议,取值[http, https, http/https]
:type Protocol: str
:param Domain: 转发域名
:type Domain: str
:param Enable: =1表示开启;=0表示关闭
:type Enable: int
:param Interval: 检测间隔时间,单位秒
:type Interval: int
:param KickNum: 异常判定次数,单位次
:type KickNum: int
:param AliveNum: 健康判定次数,单位次
:type AliveNum: int
:param Method: 健康检查探测方法,可选HEAD或GET,默认为HEAD
:type Method: str
:param StatusCode: 健康检查判定正常状态码,1xx =1, 2xx=2, 3xx=4, 4xx=8,5xx=16,多个状态码值加和
:type StatusCode: int
:param Url: 检查目录的URL,默认为/
:type Url: str
"""
self.Protocol = None
self.Domain = None
self.Enable = None
self.Interval = None
self.KickNum = None
self.AliveNum = None
self.Method = None
self.StatusCode = None
self.Url = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.Domain = params.get("Domain")
self.Enable = params.get("Enable")
self.Interval = params.get("Interval")
self.KickNum = params.get("KickNum")
self.AliveNum = params.get("AliveNum")
self.Method = params.get("Method")
self.StatusCode = params.get("StatusCode")
self.Url = params.get("Url")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class L7RuleEntry(AbstractModel):
"""L7规则
"""
def __init__(self):
r"""
:param Protocol: 转发协议,取值[http, https]
:type Protocol: str
:param Domain: 转发域名
:type Domain: str
:param SourceType: 回源方式,取值[1(域名回源),2(IP回源)]
:type SourceType: int
:param KeepTime: 会话保持时间,单位秒
:type KeepTime: int
:param SourceList: 回源列表
:type SourceList: list of L4RuleSource
:param LbType: 负载均衡方式,取值[1(加权轮询)]
:type LbType: int
:param KeepEnable: 会话保持开关,取值[0(会话保持关闭),1(会话保持开启)]
:type KeepEnable: int
:param RuleId: 规则ID,当添加新规则时可以不用填写此字段;当修改或者删除规则时需要填写此字段;
:type RuleId: str
:param CertType: 证书来源,当转发协议为https时必须填,取值[2(腾讯云托管证书)],当转发协议为http时也可以填0
:type CertType: int
:param SSLId: 当证书来源为腾讯云托管证书时,此字段必须填写托管证书ID
:type SSLId: str
:param Cert: 当证书来源为自有证书时,此字段必须填写证书内容;(因已不再支持自有证书,此字段已弃用,请不用填写此字段)
:type Cert: str
:param PrivateKey: 当证书来源为自有证书时,此字段必须填写证书密钥;(因已不再支持自有证书,此字段已弃用,请不用填写此字段)
:type PrivateKey: str
:param RuleName: 规则描述
:type RuleName: str
:param Status: 规则状态,取值[0(规则配置成功),1(规则配置生效中),2(规则配置失败),3(规则删除生效中),5(规则删除失败),6(规则等待配置),7(规则等待删除),8(规则待配置证书)]
:type Status: int
:param CCStatus: cc防护状态,取值[0(关闭), 1(开启)]
:type CCStatus: int
:param CCEnable: HTTPS协议的CC防护状态,取值[0(关闭), 1(开启)]
:type CCEnable: int
:param CCThreshold: HTTPS协议的CC防护阈值
:type CCThreshold: int
:param CCLevel: HTTPS协议的CC防护等级
:type CCLevel: str
:param HttpsToHttpEnable: 是否开启Https协议使用Http回源,取值[0(关闭), 1(开启)],不填写默认是关闭
注意:此字段可能返回 null,表示取不到有效值。
:type HttpsToHttpEnable: int
:param VirtualPort: 接入端口值
注意:此字段可能返回 null,表示取不到有效值。
:type VirtualPort: int
"""
self.Protocol = None
self.Domain = None
self.SourceType = None
self.KeepTime = None
self.SourceList = None
self.LbType = None
self.KeepEnable = None
self.RuleId = None
self.CertType = None
self.SSLId = None
self.Cert = None
self.PrivateKey = None
self.RuleName = None
self.Status = None
self.CCStatus = None
self.CCEnable = None
self.CCThreshold = None
self.CCLevel = None
self.HttpsToHttpEnable = None
self.VirtualPort = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.Domain = params.get("Domain")
self.SourceType = params.get("SourceType")
self.KeepTime = params.get("KeepTime")
if params.get("SourceList") is not None:
self.SourceList = []
for item in params.get("SourceList"):
obj = L4RuleSource()
obj._deserialize(item)
self.SourceList.append(obj)
self.LbType = params.get("LbType")
self.KeepEnable = params.get("KeepEnable")
self.RuleId = params.get("RuleId")
self.CertType = params.get("CertType")
self.SSLId = params.get("SSLId")
self.Cert = params.get("Cert")
self.PrivateKey = params.get("PrivateKey")
self.RuleName = params.get("RuleName")
self.Status = params.get("Status")
self.CCStatus = params.get("CCStatus")
self.CCEnable = params.get("CCEnable")
self.CCThreshold = params.get("CCThreshold")
self.CCLevel = params.get("CCLevel")
self.HttpsToHttpEnable = params.get("HttpsToHttpEnable")
self.VirtualPort = params.get("VirtualPort")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class L7RuleHealth(AbstractModel):
"""L7规则健康检查参数
"""
def __init__(self):
r"""
:param RuleId: 规则ID
:type RuleId: str
:param Enable: =1表示开启;=0表示关闭
:type Enable: int
:param Interval: 检测间隔时间,单位秒
:type Interval: int
:param KickNum: 不健康阈值,单位次
:type KickNum: int
:param AliveNum: 健康阈值,单位次
:type AliveNum: int
:param Method: HTTP请求方式,取值[HEAD,GET]
:type Method: str
:param StatusCode: 健康检查判定正常状态码,1xx =1, 2xx=2, 3xx=4, 4xx=8,5xx=16,多个状态码值加和
:type StatusCode: int
:param Url: 检查目录的URL,默认为/
:type Url: str
:param Status: 配置状态,0: 正常,1:配置中,2:配置失败
:type Status: int
"""
self.RuleId = None
self.Enable = None
self.Interval = None
self.KickNum = None
self.AliveNum = None
self.Method = None
self.StatusCode = None
self.Url = None
self.Status = None
def _deserialize(self, params):
self.RuleId = params.get("RuleId")
self.Enable = params.get("Enable")
self.Interval = params.get("Interval")
self.KickNum = params.get("KickNum")
self.AliveNum = params.get("AliveNum")
self.Method = params.get("Method")
self.StatusCode = params.get("StatusCode")
self.Url = params.get("Url")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCAlarmThresholdRequest(AbstractModel):
"""ModifyCCAlarmThreshold请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(shield表示棋牌;bgpip表示高防IP;bgp表示高防包;bgp-multip表示多ip高防包;net表示高防IP专业版)
:type Business: str
:param RsId: 资源ID,字符串类型
:type RsId: str
:param AlarmThreshold: 告警阈值,大于0(目前排定的值),后台设置默认值为1000
:type AlarmThreshold: int
:param IpList: 资源关联的IP列表,高防包未绑定时,传空数组,高防IP专业版传多个IP的数据
:type IpList: list of str
"""
self.Business = None
self.RsId = None
self.AlarmThreshold = None
self.IpList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.RsId = params.get("RsId")
self.AlarmThreshold = params.get("AlarmThreshold")
self.IpList = params.get("IpList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCAlarmThresholdResponse(AbstractModel):
"""ModifyCCAlarmThreshold返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCFrequencyRulesRequest(AbstractModel):
"""ModifyCCFrequencyRules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param CCFrequencyRuleId: CC的访问频率控制规则ID
:type CCFrequencyRuleId: str
:param Mode: 匹配规则,取值["include"(前缀匹配),"equal"(完全匹配)]
:type Mode: str
:param Period: 统计周期,单位秒,取值[10, 30, 60]
:type Period: int
:param ReqNumber: 访问次数,取值[1-10000]
:type ReqNumber: int
:param Act: 执行动作,取值["alg"(人机识别), "drop"(拦截)]
:type Act: str
:param ExeDuration: 执行时间,单位秒,取值[1-900]
:type ExeDuration: int
:param Uri: URI字符串,必须以/开头,例如/abc/a.php,长度不超过31;当URI=/时,匹配模式只能选择前缀匹配;
:type Uri: str
:param UserAgent: User-Agent字符串,长度不超过80
:type UserAgent: str
:param Cookie: Cookie字符串,长度不超过40
:type Cookie: str
"""
self.Business = None
self.CCFrequencyRuleId = None
self.Mode = None
self.Period = None
self.ReqNumber = None
self.Act = None
self.ExeDuration = None
self.Uri = None
self.UserAgent = None
self.Cookie = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.CCFrequencyRuleId = params.get("CCFrequencyRuleId")
self.Mode = params.get("Mode")
self.Period = params.get("Period")
self.ReqNumber = params.get("ReqNumber")
self.Act = params.get("Act")
self.ExeDuration = params.get("ExeDuration")
self.Uri = params.get("Uri")
self.UserAgent = params.get("UserAgent")
self.Cookie = params.get("Cookie")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCFrequencyRulesResponse(AbstractModel):
"""ModifyCCFrequencyRules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCFrequencyRulesStatusRequest(AbstractModel):
"""ModifyCCFrequencyRulesStatus请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleId: 7层转发规则ID(通过获取7层转发规则接口可以获取规则ID)
:type RuleId: str
:param Method: 开启或关闭,取值["on"(开启),"off"(关闭)]
:type Method: str
"""
self.Business = None
self.Id = None
self.RuleId = None
self.Method = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleId = params.get("RuleId")
self.Method = params.get("Method")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCFrequencyRulesStatusResponse(AbstractModel):
"""ModifyCCFrequencyRulesStatus返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCHostProtectionRequest(AbstractModel):
"""ModifyCCHostProtection请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleId: 规则ID
:type RuleId: str
:param Method: 开启/关闭CC域名防护,取值[open(表示开启),close(表示关闭)]
:type Method: str
"""
self.Business = None
self.Id = None
self.RuleId = None
self.Method = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleId = params.get("RuleId")
self.Method = params.get("Method")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCHostProtectionResponse(AbstractModel):
"""ModifyCCHostProtection返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCIpAllowDenyRequest(AbstractModel):
"""ModifyCCIpAllowDeny请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Method: add表示添加,delete表示删除
:type Method: str
:param Type: 黑/白名单类型;取值[white(白名单),black(黑名单)]
:type Type: str
:param IpList: 黑/白名单的IP数组
:type IpList: list of str
:param Protocol: 可选字段,代表CC防护类型,取值[http(HTTP协议的CC防护),https(HTTPS协议的CC防护)];当不填时,默认为HTTP协议的CC防护;当填写https时还需要填写Domain和RuleId字段;
:type Protocol: str
:param Domain: 可选字段,表示HTTPS协议的7层转发规则域名(通过获取7层转发规则接口可以获取域名),只有当Protocol字段为https时才必须填写此字段;
:type Domain: str
:param RuleId: 可选字段,表示HTTPS协议的7层转发规则ID(通过获取7层转发规则接口可以获取规则ID),
当Method为delete时,不用填写此字段;
:type RuleId: str
"""
self.Business = None
self.Id = None
self.Method = None
self.Type = None
self.IpList = None
self.Protocol = None
self.Domain = None
self.RuleId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Method = params.get("Method")
self.Type = params.get("Type")
self.IpList = params.get("IpList")
self.Protocol = params.get("Protocol")
self.Domain = params.get("Domain")
self.RuleId = params.get("RuleId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCIpAllowDenyResponse(AbstractModel):
"""ModifyCCIpAllowDeny返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCLevelRequest(AbstractModel):
"""ModifyCCLevel请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Level: CC防护等级,取值[default(正常), loose(宽松), strict(严格)];
:type Level: str
:param Protocol: 可选字段,代表CC防护类型,取值[http(HTTP协议的CC防护),https(HTTPS协议的CC防护)];当不填时,默认为HTTP协议的CC防护;当填写https时还需要填写RuleId字段;
:type Protocol: str
:param RuleId: 表示7层转发规则ID(通过获取7层转发规则接口可以获取规则ID);
:type RuleId: str
"""
self.Business = None
self.Id = None
self.Level = None
self.Protocol = None
self.RuleId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Level = params.get("Level")
self.Protocol = params.get("Protocol")
self.RuleId = params.get("RuleId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCLevelResponse(AbstractModel):
"""ModifyCCLevel返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCPolicySwitchRequest(AbstractModel):
"""ModifyCCPolicySwitch请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param SetId: 策略ID
:type SetId: str
:param Switch: 开关状态
:type Switch: int
"""
self.Business = None
self.Id = None
self.SetId = None
self.Switch = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.SetId = params.get("SetId")
self.Switch = params.get("Switch")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCPolicySwitchResponse(AbstractModel):
"""ModifyCCPolicySwitch返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCSelfDefinePolicyRequest(AbstractModel):
"""ModifyCCSelfDefinePolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param SetId: 策略ID
:type SetId: str
:param Policy: CC策略描述
:type Policy: :class:`tencentcloud.dayu.v20180709.models.CCPolicy`
"""
self.Business = None
self.Id = None
self.SetId = None
self.Policy = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.SetId = params.get("SetId")
if params.get("Policy") is not None:
self.Policy = CCPolicy()
self.Policy._deserialize(params.get("Policy"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCSelfDefinePolicyResponse(AbstractModel):
"""ModifyCCSelfDefinePolicy返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCThresholdRequest(AbstractModel):
"""ModifyCCThreshold请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版;basic表示基础防护)
:type Business: str
:param Threshold: CC防护阈值,取值(0 100 150 240 350 480 550 700 850 1000 1500 2000 3000 5000 10000 20000);
当Business为高防IP、高防IP专业版时,其CC防护最大阈值跟资源的保底防护带宽有关,对应关系如下:
保底带宽: 最大C防护阈值
10: 20000,
20: 40000,
30: 70000,
40: 100000,
50: 150000,
60: 200000,
80: 250000,
100: 300000,
:type Threshold: int
:param Id: 资源ID
:type Id: str
:param Protocol: 可选字段,代表CC防护类型,取值[http(HTTP协议的CC防护),https(HTTPS协议的CC防护)];当不填时,默认为HTTP协议的CC防护;当填写https时还需要填写RuleId字段;
:type Protocol: str
:param RuleId: 可选字段,表示HTTPS协议的7层转发规则ID(通过获取7层转发规则接口可以获取规则ID);
当Protocol=https时必须填写;
:type RuleId: str
:param BasicIp: 查询的IP地址(仅基础防护提供),取值如:1.1.1.1
:type BasicIp: str
:param BasicRegion: 查询IP所属地域(仅基础防护提供),取值如:gz、bj、sh、hk等地域缩写
:type BasicRegion: str
:param BasicBizType: 专区类型(仅基础防护提供),取值如:公有云专区:public,黑石专区:bm, NAT服务器专区:nat,互联网通道:channel。
:type BasicBizType: str
:param BasicDeviceType: 设备类型(仅基础防护提供),取值如:服务器:cvm,公有云负载均衡:clb,黑石负载均衡:lb,NAT服务器:nat,互联网通道:channel.
:type BasicDeviceType: str
:param BasicIpInstance: 仅基础防护提供。可选,IPInstance Nat 网关(如果查询的设备类型是NAT服务器,需要传此参数,通过nat资源查询接口获取)
:type BasicIpInstance: str
:param BasicIspCode: 仅基础防护提供。可选,运营商线路(如果查询的设备类型是NAT服务器,需要传此参数为5)
:type BasicIspCode: int
:param Domain: 可选字段,当协议取值HTTPS时,必填
:type Domain: str
"""
self.Business = None
self.Threshold = None
self.Id = None
self.Protocol = None
self.RuleId = None
self.BasicIp = None
self.BasicRegion = None
self.BasicBizType = None
self.BasicDeviceType = None
self.BasicIpInstance = None
self.BasicIspCode = None
self.Domain = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Threshold = params.get("Threshold")
self.Id = params.get("Id")
self.Protocol = params.get("Protocol")
self.RuleId = params.get("RuleId")
self.BasicIp = params.get("BasicIp")
self.BasicRegion = params.get("BasicRegion")
self.BasicBizType = params.get("BasicBizType")
self.BasicDeviceType = params.get("BasicDeviceType")
self.BasicIpInstance = params.get("BasicIpInstance")
self.BasicIspCode = params.get("BasicIspCode")
self.Domain = params.get("Domain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCThresholdResponse(AbstractModel):
"""ModifyCCThreshold返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyCCUrlAllowRequest(AbstractModel):
"""ModifyCCUrlAllow请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Method: =add表示添加,=delete表示删除
:type Method: str
:param Type: 黑/白名单类型;取值[white(白名单)]
:type Type: str
:param UrlList: URL数组,URL格式如下:
http://域名/cgi
https://域名/cgi
:type UrlList: list of str
:param Protocol: 可选字段,代表CC防护类型,取值[http(HTTP协议的CC防护),https(HTTPS协议的CC防护)];当不填时,默认为HTTP协议的CC防护;当填写https时还需要填写Domain和RuleId字段;
:type Protocol: str
:param Domain: 可选字段,表示HTTPS协议的7层转发规则域名(通过获取7层转发规则接口可以获取域名),只有当Protocol字段为https时才必须填写此字段;
:type Domain: str
:param RuleId: 可选字段,表示HTTPS协议的7层转发规则ID(通过获取7层转发规则接口可以获取规则ID),当添加并且Protocol=https时必须填写;
当Method为delete时,可以不用填写此字段;
:type RuleId: str
"""
self.Business = None
self.Id = None
self.Method = None
self.Type = None
self.UrlList = None
self.Protocol = None
self.Domain = None
self.RuleId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Method = params.get("Method")
self.Type = params.get("Type")
self.UrlList = params.get("UrlList")
self.Protocol = params.get("Protocol")
self.Domain = params.get("Domain")
self.RuleId = params.get("RuleId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyCCUrlAllowResponse(AbstractModel):
"""ModifyCCUrlAllow返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyDDoSAIStatusRequest(AbstractModel):
"""ModifyDDoSAIStatus请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Method: =get表示读取AI防护状态;=set表示修改AI防护状态;
:type Method: str
:param DDoSAI: AI防护状态,取值[on,off];当Method=set时必填;
:type DDoSAI: str
"""
self.Business = None
self.Id = None
self.Method = None
self.DDoSAI = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Method = params.get("Method")
self.DDoSAI = params.get("DDoSAI")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSAIStatusResponse(AbstractModel):
"""ModifyDDoSAIStatus返回参数结构体
"""
def __init__(self):
r"""
:param DDoSAI: AI防护状态,取值[on,off]
:type DDoSAI: str
:param Id: 资源ID
:type Id: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DDoSAI = None
self.Id = None
self.RequestId = None
def _deserialize(self, params):
self.DDoSAI = params.get("DDoSAI")
self.Id = params.get("Id")
self.RequestId = params.get("RequestId")
class ModifyDDoSAlarmThresholdRequest(AbstractModel):
"""ModifyDDoSAlarmThreshold请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(shield表示棋牌;bgpip表示高防IP;bgp表示高防包;bgp-multip表示多ip高防包;net表示高防IP专业版)
:type Business: str
:param RsId: 资源ID,字符串类型
:type RsId: str
:param AlarmType: 告警阈值类型,0-未设置,1-入流量,2-清洗流量
:type AlarmType: int
:param AlarmThreshold: 告警阈值,大于0(目前暂定的值)
:type AlarmThreshold: int
:param IpList: 资源关联的IP列表,高防包未绑定时,传空数组,高防IP专业版传多个IP的数据
:type IpList: list of str
"""
self.Business = None
self.RsId = None
self.AlarmType = None
self.AlarmThreshold = None
self.IpList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.RsId = params.get("RsId")
self.AlarmType = params.get("AlarmType")
self.AlarmThreshold = params.get("AlarmThreshold")
self.IpList = params.get("IpList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSAlarmThresholdResponse(AbstractModel):
"""ModifyDDoSAlarmThreshold返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyDDoSDefendStatusRequest(AbstractModel):
"""ModifyDDoSDefendStatus请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgp表示独享包;bgp-multip表示共享包;bgpip表示高防IP;net表示高防IP专业版;basic表示基础防护)
:type Business: str
:param Status: 防护状态值,取值[0(关闭),1(开启)]
:type Status: int
:param Hour: 关闭时长,单位小时,取值[0,1,2,3,4,5,6];当Status=0表示关闭时,Hour必须大于0;
:type Hour: int
:param Id: 资源ID;当Business不是基础防护时必须填写此字段;
:type Id: str
:param Ip: 基础防护的IP,只有当Business为基础防护时才需要填写此字段;
:type Ip: str
:param BizType: 只有当Business为基础防护时才需要填写此字段,IP所属的产品类型,取值[public(CVM产品),bm(黑石产品),eni(弹性网卡),vpngw(VPN网关), natgw(NAT网关),waf(Web应用安全产品),fpc(金融产品),gaap(GAAP产品), other(托管IP)]
:type BizType: str
:param DeviceType: 只有当Business为基础防护时才需要填写此字段,IP所属的产品子类,取值[cvm(CVM),lb(负载均衡器),eni(弹性网卡),vpngw(VPN),natgw(NAT),waf(WAF),fpc(金融),gaap(GAAP),other(托管IP),eip(黑石弹性IP)]
:type DeviceType: str
:param InstanceId: 只有当Business为基础防护时才需要填写此字段,IP所属的资源实例ID,当绑定新IP时必须填写此字段;例如是弹性网卡的IP,则InstanceId填写弹性网卡的ID(eni-*);
:type InstanceId: str
:param IPRegion: 只有当Business为基础防护时才需要填写此字段,表示IP所属的地域,取值:
"bj": 华北地区(北京)
"cd": 西南地区(成都)
"cq": 西南地区(重庆)
"gz": 华南地区(广州)
"gzopen": 华南地区(广州Open)
"hk": 中国香港
"kr": 东南亚地区(首尔)
"sh": 华东地区(上海)
"shjr": 华东地区(上海金融)
"szjr": 华南地区(深圳金融)
"sg": 东南亚地区(新加坡)
"th": 东南亚地区(泰国)
"de": 欧洲地区(德国)
"usw": 美国西部(硅谷)
"ca": 北美地区(多伦多)
"jp": 日本
"hzec": 杭州
"in": 印度
"use": 美东地区(弗吉尼亚)
"ru": 俄罗斯
"tpe": 中国台湾
"nj": 南京
:type IPRegion: str
"""
self.Business = None
self.Status = None
self.Hour = None
self.Id = None
self.Ip = None
self.BizType = None
self.DeviceType = None
self.InstanceId = None
self.IPRegion = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Status = params.get("Status")
self.Hour = params.get("Hour")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.BizType = params.get("BizType")
self.DeviceType = params.get("DeviceType")
self.InstanceId = params.get("InstanceId")
self.IPRegion = params.get("IPRegion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSDefendStatusResponse(AbstractModel):
"""ModifyDDoSDefendStatus返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyDDoSLevelRequest(AbstractModel):
"""ModifyDDoSLevel请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Method: =get表示读取防护等级;=set表示修改防护等级
:type Method: str
:param DDoSLevel: 防护等级,取值[low,middle,high];当Method=set时必填
:type DDoSLevel: str
"""
self.Business = None
self.Id = None
self.Method = None
self.DDoSLevel = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Method = params.get("Method")
self.DDoSLevel = params.get("DDoSLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSLevelResponse(AbstractModel):
"""ModifyDDoSLevel返回参数结构体
"""
def __init__(self):
r"""
:param Id: 资源ID
:type Id: str
:param DDoSLevel: 防护等级,取值[low,middle,high]
:type DDoSLevel: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Id = None
self.DDoSLevel = None
self.RequestId = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.DDoSLevel = params.get("DDoSLevel")
self.RequestId = params.get("RequestId")
class ModifyDDoSPolicyCaseRequest(AbstractModel):
"""ModifyDDoSPolicyCase请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param SceneId: 策略场景ID
:type SceneId: str
:param PlatformTypes: 开发平台,取值[PC(PC客户端), MOBILE(移动端), TV(电视端), SERVER(主机)]
:type PlatformTypes: list of str
:param AppType: 细分品类,取值[WEB(网站), GAME(游戏), APP(应用), OTHER(其他)]
:type AppType: str
:param AppProtocols: 应用协议,取值[tcp(TCP协议),udp(UDP协议),icmp(ICMP协议),all(其他协议)]
:type AppProtocols: list of str
:param TcpSportStart: TCP业务起始端口,取值(0, 65535]
:type TcpSportStart: str
:param TcpSportEnd: TCP业务结束端口,取值(0, 65535],必须大于等于TCP业务起始端口
:type TcpSportEnd: str
:param UdpSportStart: UDP业务起始端口,取值范围(0, 65535]
:type UdpSportStart: str
:param UdpSportEnd: UDP业务结束端口,取值范围(0, 65535),必须大于等于UDP业务起始端口
:type UdpSportEnd: str
:param HasAbroad: 是否有海外客户,取值[no(没有), yes(有)]
:type HasAbroad: str
:param HasInitiateTcp: 是否会主动对外发起TCP请求,取值[no(不会), yes(会)]
:type HasInitiateTcp: str
:param HasInitiateUdp: 是否会主动对外发起UDP业务请求,取值[no(不会), yes(会)]
:type HasInitiateUdp: str
:param PeerTcpPort: 主动发起TCP请求的端口,取值范围(0, 65535]
:type PeerTcpPort: str
:param PeerUdpPort: 主动发起UDP请求的端口,取值范围(0, 65535]
:type PeerUdpPort: str
:param TcpFootprint: TCP载荷的固定特征码,字符串长度小于512
:type TcpFootprint: str
:param UdpFootprint: UDP载荷的固定特征码,字符串长度小于512
:type UdpFootprint: str
:param WebApiUrl: Web业务的API的URL
:type WebApiUrl: list of str
:param MinTcpPackageLen: TCP业务报文长度最小值,取值范围(0, 1500)
:type MinTcpPackageLen: str
:param MaxTcpPackageLen: TCP业务报文长度最大值,取值范围(0, 1500),必须大于等于TCP业务报文长度最小值
:type MaxTcpPackageLen: str
:param MinUdpPackageLen: UDP业务报文长度最小值,取值范围(0, 1500)
:type MinUdpPackageLen: str
:param MaxUdpPackageLen: UDP业务报文长度最大值,取值范围(0, 1500),必须大于等于UDP业务报文长度最小值
:type MaxUdpPackageLen: str
:param HasVPN: 是否有VPN业务,取值[no(没有), yes(有)]
:type HasVPN: str
:param TcpPortList: TCP业务端口列表,同时支持单个端口和端口段,字符串格式,例如:80,443,700-800,53,1000-3000
:type TcpPortList: str
:param UdpPortList: UDP业务端口列表,同时支持单个端口和端口段,字符串格式,例如:80,443,700-800,53,1000-3000
:type UdpPortList: str
"""
self.Business = None
self.SceneId = None
self.PlatformTypes = None
self.AppType = None
self.AppProtocols = None
self.TcpSportStart = None
self.TcpSportEnd = None
self.UdpSportStart = None
self.UdpSportEnd = None
self.HasAbroad = None
self.HasInitiateTcp = None
self.HasInitiateUdp = None
self.PeerTcpPort = None
self.PeerUdpPort = None
self.TcpFootprint = None
self.UdpFootprint = None
self.WebApiUrl = None
self.MinTcpPackageLen = None
self.MaxTcpPackageLen = None
self.MinUdpPackageLen = None
self.MaxUdpPackageLen = None
self.HasVPN = None
self.TcpPortList = None
self.UdpPortList = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.SceneId = params.get("SceneId")
self.PlatformTypes = params.get("PlatformTypes")
self.AppType = params.get("AppType")
self.AppProtocols = params.get("AppProtocols")
self.TcpSportStart = params.get("TcpSportStart")
self.TcpSportEnd = params.get("TcpSportEnd")
self.UdpSportStart = params.get("UdpSportStart")
self.UdpSportEnd = params.get("UdpSportEnd")
self.HasAbroad = params.get("HasAbroad")
self.HasInitiateTcp = params.get("HasInitiateTcp")
self.HasInitiateUdp = params.get("HasInitiateUdp")
self.PeerTcpPort = params.get("PeerTcpPort")
self.PeerUdpPort = params.get("PeerUdpPort")
self.TcpFootprint = params.get("TcpFootprint")
self.UdpFootprint = params.get("UdpFootprint")
self.WebApiUrl = params.get("WebApiUrl")
self.MinTcpPackageLen = params.get("MinTcpPackageLen")
self.MaxTcpPackageLen = params.get("MaxTcpPackageLen")
self.MinUdpPackageLen = params.get("MinUdpPackageLen")
self.MaxUdpPackageLen = params.get("MaxUdpPackageLen")
self.HasVPN = params.get("HasVPN")
self.TcpPortList = params.get("TcpPortList")
self.UdpPortList = params.get("UdpPortList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSPolicyCaseResponse(AbstractModel):
"""ModifyDDoSPolicyCase返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyDDoSPolicyNameRequest(AbstractModel):
"""ModifyDDoSPolicyName请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param PolicyId: 策略ID
:type PolicyId: str
:param Name: 策略名称
:type Name: str
"""
self.Business = None
self.PolicyId = None
self.Name = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.PolicyId = params.get("PolicyId")
self.Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSPolicyNameResponse(AbstractModel):
"""ModifyDDoSPolicyName返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyDDoSPolicyRequest(AbstractModel):
"""ModifyDDoSPolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param PolicyId: 策略ID
:type PolicyId: str
:param DropOptions: 协议禁用,必须填写且数组长度必须为1
:type DropOptions: list of DDoSPolicyDropOption
:param PortLimits: 端口禁用,当没有禁用端口时填空数组
:type PortLimits: list of DDoSPolicyPortLimit
:param IpAllowDenys: IP黑白名单,当没有IP黑白名单时填空数组
:type IpAllowDenys: list of IpBlackWhite
:param PacketFilters: 报文过滤,当没有报文过滤时填空数组
:type PacketFilters: list of DDoSPolicyPacketFilter
:param WaterPrint: 水印策略参数,当没有启用水印功能时填空数组,最多只能传一条水印策略(即数组大小不超过1)
:type WaterPrint: list of WaterPrintPolicy
"""
self.Business = None
self.PolicyId = None
self.DropOptions = None
self.PortLimits = None
self.IpAllowDenys = None
self.PacketFilters = None
self.WaterPrint = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.PolicyId = params.get("PolicyId")
if params.get("DropOptions") is not None:
self.DropOptions = []
for item in params.get("DropOptions"):
obj = DDoSPolicyDropOption()
obj._deserialize(item)
self.DropOptions.append(obj)
if params.get("PortLimits") is not None:
self.PortLimits = []
for item in params.get("PortLimits"):
obj = DDoSPolicyPortLimit()
obj._deserialize(item)
self.PortLimits.append(obj)
if params.get("IpAllowDenys") is not None:
self.IpAllowDenys = []
for item in params.get("IpAllowDenys"):
obj = IpBlackWhite()
obj._deserialize(item)
self.IpAllowDenys.append(obj)
if params.get("PacketFilters") is not None:
self.PacketFilters = []
for item in params.get("PacketFilters"):
obj = DDoSPolicyPacketFilter()
obj._deserialize(item)
self.PacketFilters.append(obj)
if params.get("WaterPrint") is not None:
self.WaterPrint = []
for item in params.get("WaterPrint"):
obj = WaterPrintPolicy()
obj._deserialize(item)
self.WaterPrint.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSPolicyResponse(AbstractModel):
"""ModifyDDoSPolicy返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyDDoSSwitchRequest(AbstractModel):
"""ModifyDDoSSwitch请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(basic表示基础防护)
:type Business: str
:param Method: =get表示读取DDoS防护状态;=set表示修改DDoS防护状态;
:type Method: str
:param Ip: 基础防护的IP,只有当Business为基础防护时才需要填写此字段;
:type Ip: str
:param BizType: 只有当Business为基础防护时才需要填写此字段,IP所属的产品类型,取值[public(CVM产品),bm(黑石产品),eni(弹性网卡),vpngw(VPN网关), natgw(NAT网关),waf(Web应用安全产品),fpc(金融产品),gaap(GAAP产品), other(托管IP)]
:type BizType: str
:param DeviceType: 只有当Business为基础防护时才需要填写此字段,IP所属的产品子类,取值[cvm(CVM),lb(负载均衡器),eni(弹性网卡),vpngw(VPN),natgw(NAT),waf(WAF),fpc(金融),gaap(GAAP),other(托管IP),eip(黑石弹性IP)]
:type DeviceType: str
:param InstanceId: 只有当Business为基础防护时才需要填写此字段,IP所属的资源实例ID,当绑定新IP时必须填写此字段;例如是弹性网卡的IP,则InstanceId填写弹性网卡的ID(eni-*);
:type InstanceId: str
:param IPRegion: 只有当Business为基础防护时才需要填写此字段,表示IP所属的地域,取值:
"bj": 华北地区(北京)
"cd": 西南地区(成都)
"cq": 西南地区(重庆)
"gz": 华南地区(广州)
"gzopen": 华南地区(广州Open)
"hk": 中国香港
"kr": 东南亚地区(首尔)
"sh": 华东地区(上海)
"shjr": 华东地区(上海金融)
"szjr": 华南地区(深圳金融)
"sg": 东南亚地区(新加坡)
"th": 东南亚地区(泰国)
"de": 欧洲地区(德国)
"usw": 美国西部(硅谷)
"ca": 北美地区(多伦多)
"jp": 日本
"hzec": 杭州
"in": 印度
"use": 美东地区(弗吉尼亚)
"ru": 俄罗斯
"tpe": 中国台湾
"nj": 南京
:type IPRegion: str
:param Status: 可选字段,防护状态值,取值[0(关闭),1(开启)];当Method为get时可以不填写此字段;
:type Status: int
"""
self.Business = None
self.Method = None
self.Ip = None
self.BizType = None
self.DeviceType = None
self.InstanceId = None
self.IPRegion = None
self.Status = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Method = params.get("Method")
self.Ip = params.get("Ip")
self.BizType = params.get("BizType")
self.DeviceType = params.get("DeviceType")
self.InstanceId = params.get("InstanceId")
self.IPRegion = params.get("IPRegion")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSSwitchResponse(AbstractModel):
"""ModifyDDoSSwitch返回参数结构体
"""
def __init__(self):
r"""
:param Status: 当前防护状态值,取值[0(关闭),1(开启)]
:type Status: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.RequestId = params.get("RequestId")
class ModifyDDoSThresholdRequest(AbstractModel):
"""ModifyDDoSThreshold请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Threshold: DDoS清洗阈值,取值[0, 60, 80, 100, 150, 200, 250, 300, 400, 500, 700, 1000];
当设置值为0时,表示采用默认值;
:type Threshold: int
"""
self.Business = None
self.Id = None
self.Threshold = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Threshold = params.get("Threshold")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSThresholdResponse(AbstractModel):
"""ModifyDDoSThreshold返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyDDoSWaterKeyRequest(AbstractModel):
"""ModifyDDoSWaterKey请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param PolicyId: 策略ID
:type PolicyId: str
:param Method: 密钥操作,取值:[add(添加),delete(删除),open(开启),close(关闭),get(获取密钥)]
:type Method: str
:param KeyId: 密钥ID,当添加密钥操作时可以不填或填0,其他操作时必须填写;
:type KeyId: int
"""
self.Business = None
self.PolicyId = None
self.Method = None
self.KeyId = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.PolicyId = params.get("PolicyId")
self.Method = params.get("Method")
self.KeyId = params.get("KeyId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDDoSWaterKeyResponse(AbstractModel):
"""ModifyDDoSWaterKey返回参数结构体
"""
def __init__(self):
r"""
:param KeyList: 水印密钥列表
:type KeyList: list of WaterPrintKey
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.KeyList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("KeyList") is not None:
self.KeyList = []
for item in params.get("KeyList"):
obj = WaterPrintKey()
obj._deserialize(item)
self.KeyList.append(obj)
self.RequestId = params.get("RequestId")
class ModifyElasticLimitRequest(AbstractModel):
"""ModifyElasticLimit请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Limit: 弹性防护阈值,取值[0 10000 20000 30000 40000 50000 60000 70000 80000 90000 100000 120000 150000 200000 250000 300000 400000 600000 800000 220000 310000 110000 270000 610000]
:type Limit: int
"""
self.Business = None
self.Id = None
self.Limit = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyElasticLimitResponse(AbstractModel):
"""ModifyElasticLimit返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyL4HealthRequest(AbstractModel):
"""ModifyL4Health请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Healths: 健康检查参数数组
:type Healths: list of L4RuleHealth
"""
self.Business = None
self.Id = None
self.Healths = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Healths") is not None:
self.Healths = []
for item in params.get("Healths"):
obj = L4RuleHealth()
obj._deserialize(item)
self.Healths.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyL4HealthResponse(AbstractModel):
"""ModifyL4Health返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyL4KeepTimeRequest(AbstractModel):
"""ModifyL4KeepTime请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param RuleId: 规则ID
:type RuleId: str
:param KeepEnable: 会话保持开关,取值[0(会话保持关闭),1(会话保持开启)]
:type KeepEnable: int
:param KeepTime: 会话保持时间,单位秒
:type KeepTime: int
"""
self.Business = None
self.Id = None
self.RuleId = None
self.KeepEnable = None
self.KeepTime = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RuleId = params.get("RuleId")
self.KeepEnable = params.get("KeepEnable")
self.KeepTime = params.get("KeepTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyL4KeepTimeResponse(AbstractModel):
"""ModifyL4KeepTime返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyL4RulesRequest(AbstractModel):
"""ModifyL4Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Rule: 规则
:type Rule: :class:`tencentcloud.dayu.v20180709.models.L4RuleEntry`
"""
self.Business = None
self.Id = None
self.Rule = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Rule") is not None:
self.Rule = L4RuleEntry()
self.Rule._deserialize(params.get("Rule"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyL4RulesResponse(AbstractModel):
"""ModifyL4Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyL7RulesRequest(AbstractModel):
"""ModifyL7Rules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Rule: 规则
:type Rule: :class:`tencentcloud.dayu.v20180709.models.L7RuleEntry`
"""
self.Business = None
self.Id = None
self.Rule = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Rule") is not None:
self.Rule = L7RuleEntry()
self.Rule._deserialize(params.get("Rule"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyL7RulesResponse(AbstractModel):
"""ModifyL7Rules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyNetReturnSwitchRequest(AbstractModel):
"""ModifyNetReturnSwitch请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(net表示高防IP专业版)
:type Business: str
:param Id: 资源实例ID
:type Id: str
:param Status: Status 表示回切开关,0: 关闭, 1:打开
:type Status: int
:param Hour: 回切时长,单位:小时,取值[0,1,2,3,4,5,6;]当status=1时必选填写Hour>0
:type Hour: int
"""
self.Business = None
self.Id = None
self.Status = None
self.Hour = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.Status = params.get("Status")
self.Hour = params.get("Hour")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyNetReturnSwitchResponse(AbstractModel):
"""ModifyNetReturnSwitch返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyNewDomainRulesRequest(AbstractModel):
"""ModifyNewDomainRules请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Rule: 域名转发规则
:type Rule: :class:`tencentcloud.dayu.v20180709.models.NewL7RuleEntry`
"""
self.Business = None
self.Id = None
self.Rule = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Rule") is not None:
self.Rule = NewL7RuleEntry()
self.Rule._deserialize(params.get("Rule"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyNewDomainRulesResponse(AbstractModel):
"""ModifyNewDomainRules返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyNewL4RuleRequest(AbstractModel):
"""ModifyNewL4Rule请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP)
:type Business: str
:param Id: 资源ID
:type Id: str
:param Rule: 转发规则
:type Rule: :class:`tencentcloud.dayu.v20180709.models.L4RuleEntry`
"""
self.Business = None
self.Id = None
self.Rule = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
if params.get("Rule") is not None:
self.Rule = L4RuleEntry()
self.Rule._deserialize(params.get("Rule"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyNewL4RuleResponse(AbstractModel):
"""ModifyNewL4Rule返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyResBindDDoSPolicyRequest(AbstractModel):
"""ModifyResBindDDoSPolicy请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;bgp表示独享包;bgp-multip表示共享包;net表示高防IP专业版)
:type Business: str
:param Id: 资源ID
:type Id: str
:param PolicyId: 策略ID
:type PolicyId: str
:param Method: 绑定或解绑,bind表示绑定策略,unbind表示解绑策略
:type Method: str
"""
self.Business = None
self.Id = None
self.PolicyId = None
self.Method = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.PolicyId = params.get("PolicyId")
self.Method = params.get("Method")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyResBindDDoSPolicyResponse(AbstractModel):
"""ModifyResBindDDoSPolicy返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class ModifyResourceRenewFlagRequest(AbstractModel):
"""ModifyResourceRenewFlag请求参数结构体
"""
def __init__(self):
r"""
:param Business: 大禹子产品代号(bgpip表示高防IP;net表示高防IP专业版;shield表示棋牌盾;bgp表示独享包;bgp-multip表示共享包;insurance表示保险包;staticpack表示三网套餐包)
:type Business: str
:param Id: 资源Id
:type Id: str
:param RenewFlag: 自动续费标记(0手动续费;1自动续费;2到期不续费)
:type RenewFlag: int
"""
self.Business = None
self.Id = None
self.RenewFlag = None
def _deserialize(self, params):
self.Business = params.get("Business")
self.Id = params.get("Id")
self.RenewFlag = params.get("RenewFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyResourceRenewFlagResponse(AbstractModel):
"""ModifyResourceRenewFlag返回参数结构体
"""
def __init__(self):
r"""
:param Success: 成功码
:type Success: :class:`tencentcloud.dayu.v20180709.models.SuccessCode`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Success = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Success") is not None:
self.Success = SuccessCode()
self.Success._deserialize(params.get("Success"))
self.RequestId = params.get("RequestId")
class NewL4RuleEntry(AbstractModel):
"""四层规则结构体
"""
def __init__(self):
r"""
:param Protocol: 转发协议,取值[TCP, UDP]
:type Protocol: str
:param VirtualPort: 转发端口
:type VirtualPort: int
:param SourcePort: 源站端口
:type SourcePort: int
:param KeepTime: 会话保持时间,单位秒
:type KeepTime: int
:param SourceList: 回源列表
:type SourceList: list of L4RuleSource
:param LbType: 负载均衡方式,取值[1(加权轮询),2(源IP hash)]
:type LbType: int
:param KeepEnable: 会话保持开关,取值[0(会话保持关闭),1(会话保持开启)];
:type KeepEnable: int
:param SourceType: 回源方式,取值[1(域名回源),2(IP回源)]
:type SourceType: int
:param RuleId: 规则ID
:type RuleId: str
:param RuleName: 规则描述
:type RuleName: str
:param RemoveSwitch: 移除水印状态,取值[0(关闭),1(开启)]
:type RemoveSwitch: int
:param ModifyTime: 规则修改时间
:type ModifyTime: str
:param Region: 对应地区信息
:type Region: int
:param Ip: 绑定资源IP信息
:type Ip: str
:param Id: 绑定资源Id信息
:type Id: str
"""
self.Protocol = None
self.VirtualPort = None
self.SourcePort = None
self.KeepTime = None
self.SourceList = None
self.LbType = None
self.KeepEnable = None
self.SourceType = None
self.RuleId = None
self.RuleName = None
self.RemoveSwitch = None
self.ModifyTime = None
self.Region = None
self.Ip = None
self.Id = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.VirtualPort = params.get("VirtualPort")
self.SourcePort = params.get("SourcePort")
self.KeepTime = params.get("KeepTime")
if params.get("SourceList") is not None:
self.SourceList = []
for item in params.get("SourceList"):
obj = L4RuleSource()
obj._deserialize(item)
self.SourceList.append(obj)
self.LbType = params.get("LbType")
self.KeepEnable = params.get("KeepEnable")
self.SourceType = params.get("SourceType")
self.RuleId = params.get("RuleId")
self.RuleName = params.get("RuleName")
self.RemoveSwitch = params.get("RemoveSwitch")
self.ModifyTime = params.get("ModifyTime")
self.Region = params.get("Region")
self.Ip = params.get("Ip")
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class NewL7RuleEntry(AbstractModel):
"""L7规则
"""
def __init__(self):
r"""
:param Protocol: 转发协议,取值[http, https]
:type Protocol: str
:param Domain: 转发域名
:type Domain: str
:param SourceType: 回源方式,取值[1(域名回源),2(IP回源)]
:type SourceType: int
:param KeepTime: 会话保持时间,单位秒
:type KeepTime: int
:param SourceList: 回源列表
:type SourceList: list of L4RuleSource
:param LbType: 负载均衡方式,取值[1(加权轮询)]
:type LbType: int
:param KeepEnable: 会话保持开关,取值[0(会话保持关闭),1(会话保持开启)]
:type KeepEnable: int
:param RuleId: 规则ID,当添加新规则时可以不用填写此字段;当修改或者删除规则时需要填写此字段;
:type RuleId: str
:param CertType: 证书来源,当转发协议为https时必须填,取值[2(腾讯云托管证书)],当转发协议为http时也可以填0
:type CertType: int
:param SSLId: 当证书来源为腾讯云托管证书时,此字段必须填写托管证书ID
:type SSLId: str
:param Cert: 当证书来源为自有证书时,此字段必须填写证书内容;(因已不再支持自有证书,此字段已弃用,请不用填写此字段)
:type Cert: str
:param PrivateKey: 当证书来源为自有证书时,此字段必须填写证书密钥;(因已不再支持自有证书,此字段已弃用,请不用填写此字段)
:type PrivateKey: str
:param RuleName: 规则描述
:type RuleName: str
:param Status: 规则状态,取值[0(规则配置成功),1(规则配置生效中),2(规则配置失败),3(规则删除生效中),5(规则删除失败),6(规则等待配置),7(规则等待删除),8(规则待配置证书)]
:type Status: int
:param CCStatus: cc防护状态,取值[0(关闭), 1(开启)]
:type CCStatus: int
:param CCEnable: HTTPS协议的CC防护状态,取值[0(关闭), 1(开启)]
:type CCEnable: int
:param CCThreshold: HTTPS协议的CC防护阈值
:type CCThreshold: int
:param CCLevel: HTTPS协议的CC防护等级
:type CCLevel: str
:param Region: 区域码
:type Region: int
:param Id: 资源Id
:type Id: str
:param Ip: 资源Ip
:type Ip: str
:param ModifyTime: 修改时间
:type ModifyTime: str
:param HttpsToHttpEnable: 是否开启Https协议使用Http回源,取值[0(关闭), 1(开启)],不填写默认是关闭
:type HttpsToHttpEnable: int
:param VirtualPort: 接入端口值
注意:此字段可能返回 null,表示取不到有效值。
:type VirtualPort: int
"""
self.Protocol = None
self.Domain = None
self.SourceType = None
self.KeepTime = None
self.SourceList = None
self.LbType = None
self.KeepEnable = None
self.RuleId = None
self.CertType = None
self.SSLId = None
self.Cert = None
self.PrivateKey = None
self.RuleName = None
self.Status = None
self.CCStatus = None
self.CCEnable = None
self.CCThreshold = None
self.CCLevel = None
self.Region = None
self.Id = None
self.Ip = None
self.ModifyTime = None
self.HttpsToHttpEnable = None
self.VirtualPort = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.Domain = params.get("Domain")
self.SourceType = params.get("SourceType")
self.KeepTime = params.get("KeepTime")
if params.get("SourceList") is not None:
self.SourceList = []
for item in params.get("SourceList"):
obj = L4RuleSource()
obj._deserialize(item)
self.SourceList.append(obj)
self.LbType = params.get("LbType")
self.KeepEnable = params.get("KeepEnable")
self.RuleId = params.get("RuleId")
self.CertType = params.get("CertType")
self.SSLId = params.get("SSLId")
self.Cert = params.get("Cert")
self.PrivateKey = params.get("PrivateKey")
self.RuleName = params.get("RuleName")
self.Status = params.get("Status")
self.CCStatus = params.get("CCStatus")
self.CCEnable = params.get("CCEnable")
self.CCThreshold = params.get("CCThreshold")
self.CCLevel = params.get("CCLevel")
self.Region = params.get("Region")
self.Id = params.get("Id")
self.Ip = params.get("Ip")
self.ModifyTime = params.get("ModifyTime")
self.HttpsToHttpEnable = params.get("HttpsToHttpEnable")
self.VirtualPort = params.get("VirtualPort")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class OrderBy(AbstractModel):
"""排序字段
"""
def __init__(self):
r"""
:param Field: 排序字段名称,取值[
bandwidth(带宽),
overloadCount(超峰值次数)
]
:type Field: str
:param Order: 升降序,取值为[asc(升序),(升序),desc(降序), DESC(降序)]
:type Order: str
"""
self.Field = None
self.Order = None
def _deserialize(self, params):
self.Field = params.get("Field")
self.Order = params.get("Order")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Paging(AbstractModel):
"""分页索引
"""
def __init__(self):
r"""
:param Offset: 起始位置
:type Offset: int
:param Limit: 数量
:type Limit: int
"""
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ProtocolPort(AbstractModel):
"""Protocol、Port参数
"""
def __init__(self):
r"""
:param Protocol: 协议(tcp;udp)
:type Protocol: str
:param Port: 端口
:type Port: int
"""
self.Protocol = None
self.Port = None
def _deserialize(self, params):
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RegionInstanceCount(AbstractModel):
"""地域资源实例数
"""
def __init__(self):
r"""
:param Region: 地域码
:type Region: str
:param RegionV3: 地域码(新规范)
:type RegionV3: str
:param Count: 资源实例数
:type Count: int
"""
self.Region = None
self.RegionV3 = None
self.Count = None
def _deserialize(self, params):
self.Region = params.get("Region")
self.RegionV3 = params.get("RegionV3")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ResourceIp(AbstractModel):
"""资源的IP数组
"""
def __init__(self):
r"""
:param Id: 资源ID
:type Id: str
:param IpList: 资源的IP数组
:type IpList: list of str
"""
self.Id = None
self.IpList = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.IpList = params.get("IpList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchedulingDomain(AbstractModel):
"""调度域名信息
"""
def __init__(self):
r"""
:param Domain: 调度域名
:type Domain: str
:param BGPIpList: BGP线路IP列表
:type BGPIpList: list of str
:param CTCCIpList: 电信线路IP列表
:type CTCCIpList: list of str
:param CUCCIpList: 联通线路IP列表
:type CUCCIpList: list of str
:param CMCCIpList: 移动线路IP列表
:type CMCCIpList: list of str
:param OverseaIpList: 海外线路IP列表
:type OverseaIpList: list of str
:param Method: 调度方式,当前仅支持优先级, 取值为priority
:type Method: str
:param CreateTime: 创建时间
:type CreateTime: str
:param TTL: ttl
:type TTL: int
:param Status: 状态
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
:param ModifyTime: 修改时间
注意:此字段可能返回 null,表示取不到有效值。
:type ModifyTime: str
"""
self.Domain = None
self.BGPIpList = None
self.CTCCIpList = None
self.CUCCIpList = None
self.CMCCIpList = None
self.OverseaIpList = None
self.Method = None
self.CreateTime = None
self.TTL = None
self.Status = None
self.ModifyTime = None
def _deserialize(self, params):
self.Domain = params.get("Domain")
self.BGPIpList = params.get("BGPIpList")
self.CTCCIpList = params.get("CTCCIpList")
self.CUCCIpList = params.get("CUCCIpList")
self.CMCCIpList = params.get("CMCCIpList")
self.OverseaIpList = params.get("OverseaIpList")
self.Method = params.get("Method")
self.CreateTime = params.get("CreateTime")
self.TTL = params.get("TTL")
self.Status = params.get("Status")
self.ModifyTime = params.get("ModifyTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SuccessCode(AbstractModel):
"""操作返回码,只用于返回成功的情况
"""
def __init__(self):
r"""
:param Code: 成功/错误码
:type Code: str
:param Message: 描述
:type Message: str
"""
self.Code = None
self.Message = None
def _deserialize(self, params):
self.Code = params.get("Code")
self.Message = params.get("Message")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class WaterPrintKey(AbstractModel):
"""水印Key
"""
def __init__(self):
r"""
:param KeyId: 水印KeyID
:type KeyId: str
:param KeyContent: 水印Key值
:type KeyContent: str
:param KeyVersion: 水印Key的版本号
:type KeyVersion: str
:param OpenStatus: 是否开启,取值[0(没有开启),1(已开启)]
:type OpenStatus: int
:param CreateTime: 密钥生成时间
:type CreateTime: str
"""
self.KeyId = None
self.KeyContent = None
self.KeyVersion = None
self.OpenStatus = None
self.CreateTime = None
def _deserialize(self, params):
self.KeyId = params.get("KeyId")
self.KeyContent = params.get("KeyContent")
self.KeyVersion = params.get("KeyVersion")
self.OpenStatus = params.get("OpenStatus")
self.CreateTime = params.get("CreateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class WaterPrintPolicy(AbstractModel):
"""水印策略参数
"""
def __init__(self):
r"""
:param TcpPortList: TCP端口段,例如["2000-3000","3500-4000"]
:type TcpPortList: list of str
:param UdpPortList: UDP端口段,例如["2000-3000","3500-4000"]
:type UdpPortList: list of str
:param Offset: 水印偏移量,取值范围[0, 100)
:type Offset: int
:param RemoveSwitch: 是否自动剥离,取值[0(不自动剥离),1(自动剥离)]
:type RemoveSwitch: int
:param OpenStatus: 是否开启,取值[0(没有开启),1(已开启)]
:type OpenStatus: int
"""
self.TcpPortList = None
self.UdpPortList = None
self.Offset = None
self.RemoveSwitch = None
self.OpenStatus = None
def _deserialize(self, params):
self.TcpPortList = params.get("TcpPortList")
self.UdpPortList = params.get("UdpPortList")
self.Offset = params.get("Offset")
self.RemoveSwitch = params.get("RemoveSwitch")
self.OpenStatus = params.get("OpenStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| StarcoderdataPython |
6538882 | # Faça um programa que abra e reproduza o áudio de um arquivo MP3 (em Python).
import pygame
pygame.mixer.init()
pygame.mixer.music.load('desafio021.mp3')
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
continue
| StarcoderdataPython |
11254620 | <filename>WorkInProgress/bin/sendtorifa_v2.py
import requests
import json
import os,time,stat
import os
import csv
import shutil
import zabbix_pbi
import logging
import datetime
# Import de classe personnalisée
import class_toolbox
import class_sendmail
#___________________________________________________________________
## Initialisation de variables
csvligne = ''
## Définition des chemins
pathFolder = "/mycloud/apps/rifa/pbi" #os.getcwd()
pathtemp = pathFolder + os.sep + "temp" + os.sep
pathjson = pathFolder + os.sep + "json" + os.sep
pathconfig = pathFolder + os.sep + "config" + os.sep
pathlogs = pathFolder + os.sep + "logs" + os.sep
pbiserver = pathlogs + "infoserveurZabbix.txt"
## Host Group Zabbix
zhostgroup = 'prd-middleware_powerbi_prod,prd-middleware_powerbi_horsprod'
## Définition des variables pour envoyer un mail
to = ["<NAME> <<EMAIL>>"]
cc = [""]
bcc = [""]
###-----------------------------------------------------------------
### FIN : Définition de variables
####################################################################
####################################################################
### DEB : Définition du logueur
###-----------------------------------------------------------------
dt = class_toolbox.c_date()
newlogfile = pathlogs + str(dt.get_datetime()) + '_get-sendtorifa_v2.log'
logging.basicConfig(filename=newlogfile,
filemode='w',
level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
# logging.basicConfig(filename=pathlogs + '_get-sendtorifa_v2.log',
# filemode='w',
# level=logging.INFO,
# format='%(asctime)s %(message)s',
# datefmt='%m/%d/%Y %I:%M:%S %p')
logging.info('______Send To Rifa ________')
logging.info('________ Définition des chemins ________')
logging.info('______________ Répertoire Parent: ' + pathFolder)
logging.info('______________ Répertoire Temporaire: ' + pathtemp)
logging.info('______________ Répertoire des Json: ' + pathjson)
logging.info('______________ Répertoire de Config: ' + pathconfig)
logging.info('______________ Répertoire de Logs: ' + pathlogs)
###-----------------------------------------------------------------
### FIN : Définition du logueur
####################################################################
#___________________________________________________________________
####################################################################
### DEB : Fonctions
###-----------------------------------------------------------------
## Récupérer la liste des serveurs se trouvant dans Zabbix ==> call zabbix_pbi.py
#### Fichier de sortie ==> pathFolder\logs\infoserveurZabbix.txt
# Lit les infos des servers PBI enregistre ans le json
def readpbiservers():
logging.info('________ Parcours serverspbi.json ________')
cfg = pathconfig + "serverspbi.json"
logging.info('______________ Répertoire Parent: ' + cfg)
with open(cfg) as jsoncfg:
# obtenir dictionnaire
data = json.load(jsoncfg)
with open(pathlogs + 'j-pbi-servers_logs.txt', 'w') as file:
for s in data:
srv = data[s]['srv']
#print(srv)
#file.write(srv)
file.write(srv + "\n")
#return [srv, iua, version]
def readsettings(srv):
logging.info('________ Parcours serverspbi.json ________')
cfg = pathconfig + "serverspbi.json"
logging.info('______________ Répertoire Parent: ' + cfg)
with open(cfg) as jsoncfg:
# obtenir dictionnaire
data = json.load(jsoncfg)
srv = data[srv]['srv']
iua = data[srv]['iua']
version = data[srv]['version']
url = data[srv]['url']
env = data[srv]['env']
return [iua,env,srv,url,version]
# renvoie les infos de l'environnement
def get_env(srv):
env = srv[2]
iua_app = srv[6:-3]
logging.info('________ Get Env ________')
logging.info('______________ Param 1: ' + srv)
logging.info('______________ Serveur: ' + env)
logging.info('______________ IUA APP: ' + iua_app)
# Retourne [iua_app,env,srv,user,pwd]
logging.info('______________ Retour Get Env: ' + iua_app + ' - ' + info[2] + ' - ' + srv + ' - ' + info[0] + ' - '+ info[1])
return [iua_app,env,srv]
def compare_z_to_j():
z = pathlogs + "infoserveurZabbix.txt"
j = pathlogs + "j-pbi-servers_logs.txt"
arrpbisrv = []
with open(z) as zfile:
for zrow in zfile.readlines():
#print(zrow.rstrip('\n\r'))
with open(j) as jfile:
bln = False
for jrow in jfile.readlines():
if zrow == jrow:
zrow = zrow.replace("\n","")
arrpbisrv.append([zrow])
# print('----------------------------')
# print('JSON: ' + jrow.rstrip('\n\r'))
# print('ZBBX: ' + zrow.rstrip('\n\r'))
# logging.info('COK:Z to J:JSON:' + jrow.rstrip('\n\r') + ':ZBBX:' + jrow.rstrip('\n\r'))
bln = True
if bln == False: logging.info('ANOMALIE:Z to J:JSON:VIDE:ZBBX:' + zrow.rstrip('\n\r'))
return arrpbisrv
def compare_j_to_z():
z = pathlogs + "infoserveurZabbix.txt"
j = pathlogs + "j-pbi-servers_logs.txt"
with open(j) as jfile:
for jrow in jfile.readlines():
with open(z) as zfile:
bln = False
for zrow in zfile.readlines():
if zrow == jrow:
print('----------------------------')
print('JSON: ' + jrow.rstrip('\n\r'))
print('ZBBX: ' + zrow.rstrip('\n\r'))
logging.info('COK:J to Z:JSON:' + jrow.rstrip('\n\r') + ':' + 'ZBBX:' + zrow.rstrip('\n\r'))
bln = True
if bln == False: logging.info('ANO:J to Z:JSON:' + jrow.rstrip('\n\r') + ':ZBBX:VIDE')
def createjson(env):
logging.info('________ Create Json ________')
cpt = 0
for p in env:
cpt = cpt + 1
c = str(cpt)
if cpt != 5 :
logging.info('______________ Param ' + c + ' : ' + p)
# format du json:
# {"data": [{
# "application_iua":"env[0]",
# "environment":"env[1]",
# "server":"env[2]",
# "technical_iua":"PBI",
# "technical_version":"env[5]",
# "application_version":"",
# "technical_version_details":""
# }]}
data = {}
data["data"] = []
data["data"].append({
"application_iua": env[0],
"environment": env[1],
"server": env[2],
"technical_iua": "PBI",
"technical_version": env[4],
"application_version": "",
"technical_version_details": ""
})
logging.info(data)
## on peut envoyer directement le JSON sans passer par le stockage du fichier
##A voire ...
file = pathjson + "PBI_" + env[0] + "_" + env[2] + "_" + env[1] + ".json"
with open(file,'w') as outfile:
json.dump(data,outfile)
logging.info('______________ Retour json: ' + file)
return file
def get_info(pbisrvtab):
logging.info('________ Get Info ________')
## On boucle sur le fichier de sortie pathFolder\logs\infoserveurZabbix.txt
for srv in pbisrvtab:
srv = str(srv).replace("[","")
srv = str(srv).replace("]","")
srv = str(srv).replace("'","")
infoserver = readsettings(srv)
# On crée le json
file = createjson(infoserver)
# print("===> JSON File created : " + file)
#csvligne.writerow(["===> JSON File created : " + file])
logging.info('===> JSON File created : " + file')
# On envoie à RIFA le fichier json générés
sendtorifa(file)
logging.info('________ Sended to RIFA ________')
print("The file: " + file + " is sended to Rifa")
logging.info('')
# Supprime les fichiers qui n'ont pas besoin d'être historisé
def removefiles():
logging.info('________ Suppression des fichiers précédemment utilisés ________')
for file in os.listdir(pathtemp):
filet=pathtemp + file
os.remove(filet)
logging.info('______________ Répertoire temporaire: ' + filet)
for file in os.listdir(pathjson):
filej=pathjson + file
os.remove(filej)
logging.info('______________ Répertoire Json: ' + filej)
# Envoie les fichiers à RIFA
def sendtorifa(file):
logging.info('________ Envoie du fichier à RIFA ' + file + ' ________')
logging.info('______________ Appel à la classe c_sendtorifa: ')
s = class_toolbox.c_sendtorifa(pathlogs)
return s.sendtorifa(file)
# Supprime les fichiers de log (Rétention 7 Jours)
def removeoldlogfiles():
heure = 60*60
jour=24*heure
s = 13*jour
sup_one_week = time.time() - s
os.chdir(pathlogs)
for somefile in os.listdir('.'):
mtime=os.path.getmtime(somefile)
if mtime < sup_one_week:
os.unlink(somefile)
def sendmail(to, cc, bcc, logfile):
# Variables pour envoie de mail
serveur = class_sendmail.ServeurSMTP("application-qua.emea.smtp.cib.net", 25, "", "")
exped = "mbx-middleware <<EMAIL>>"
sujet = "'RIFA: Anomalie.s sur serveur Power BI"
if logfile == '':
pjointes = []
else:
pjointes = [logfile]
codage = 'ISO-8859-15'
typetexte = 'plain'
# recherche des anomalies dans le fichier de log
tabano = ''
corps = ''
chaine = "ANOMALIE"
for f in pjointes:
print(os.path.basename(f),)
with open(f,"r") as fichier:
for ligne in fichier:
if chaine in ligne:
tabano = tabano + ligne
print(ligne.rstrip())
if tabano == '':
corps = "Pas d'anomalie"
else:
corps = tabano
# création du mail correctement formaté (en-tête + corps) et encodé
try:
message = class_sendmail.MessageSMTP(exped, to, cc, bcc, sujet, corps, pjointes, codage, typetexte)
except:
print(u"%s" % sys.exc_info()[1])
sys.exit()
# envoi du mail et affichage du résultat
rep = class_sendmail.envoieSMTP(message, serveur)
print(rep)
###-----------------------------------------------------------------
### FIN : Fonctions
####################################################################
#___________________________________________________________________
####################################################################
### DEB : MAIN
###-----------------------------------------------------------------
# Ré-initialisation des répertoires temporaires ./temp et ./json
removefiles()
# Load pbi server from Zabxix
z_pbisrv = pbiserver
zabbix_pbi.get_host_list(zhostgroup,z_pbisrv)
# On lit les serveurs PBI pré-enregistrés
readpbiservers()
# On compare les infos pour n'extraire que ce qui est commun.
# Les autres sont loggués pour traitement ultérieur
pbisrvtab = compare_z_to_j()
# On collecte les infos pour créer ler json par serveur
get_info(pbisrvtab)
# Suppression des fichiers de logs > 7 jours
removeoldlogfiles()
# Envoie du mail de fin de traitement
sendmail(to, cc, bcc, newlogfile)
###-----------------------------------------------------------------
### FIN : MAIN
####################################################################
| StarcoderdataPython |
1926552 | <reponame>eriktews/space-status-indicator
#!/usr/bin/env python
from gevent import monkey; monkey.patch_all()
from geventwebsocket.handler import WebSocketHandler
import gevent
import argparse
import re
import datetime
import time
import logging
from gevent import subprocess
from gevent import Greenlet
import socketio
import sqlite3
import json
class storage:
def __init__(self, server, file):
self.iLen = 180
self.intervals = (24*60*60)//self.iLen
self.alwaysOn = set()
self.seen = set()
self.intervalData = []
self.current = None
self.clients = 0
self.server = server
self.conn = sqlite3.connect(file)
self.conn.isolation_level = None
self.cur = self.conn.cursor()
self.init_db()
server.on('connect', namespace='/online', handler=self.connect)
def connect(self, sid, environ):
self.server.emit("log", room=sid, data=self.clients, namespace="/online")
def init_db(self):
try:
self.cur.execute("CREATE TABLE alwaysOn (mac text primary key)")
except:
pass
self.cur.execute("SELECT mac from alwaysOn")
for i in self.cur.fetchall():
self.alwaysOn.add(i[0])
def clearLast(self):
if len(self.intervalData) > self.intervals:
self.intervalData.pop(0)
def addCurrentToLast(self):
self.intervalData.append([self.current, self.seen])
def addNewSet(self, targetInterval):
logging.info("current number online " + str(self.clients))
self.updateAlwaysOn()
while (targetInterval > self.current):
self.clearLast()
self.addCurrentToLast()
self.current = self.current + 1
self.seen = set()
def addMac(self, x):
[time, mac] = x
targetInterval = time // self.iLen
if (self.current == None):
self.current = targetInterval
if targetInterval > self.current:
self.addNewSet(targetInterval)
if not mac in self.seen:
logging.debug("adding mac " + mac + " to set " + str(self.current))
self.seen.add(mac)
self.countClients()
def countClients(self):
allMacs = self.seen
if len(self.intervalData) > 0:
allMacs = allMacs | self.intervalData[-1][1]
clients = allMacs - self.alwaysOn
numClients = len(clients)
if numClients != self.clients:
self.updateClients(numClients)
self.clients = numClients
def updateAlwaysOn(self):
if len(self.intervalData) < self.intervals:
return
s = self.stats()
for i in s:
if (s[i] > 80) and (i not in self.alwaysOn):
self.alwaysOn.add(i)
self.cur.execute("INSERT INTO alwaysOn(mac) values (?)", (i,))
def stats(self):
s = {}
for i in self.intervalData:
[d, macs] = i
for m in macs:
if m in s:
s[m] = s[m]+1
else:
s[m] = 1
for i in s:
s[i] = (s[i]*100)//len(self.intervalData)
return s
def printStats(self):
s = self.stats()
for i in s:
print(str(i) + " " + str(s[i]) + "%")
def updateClients(self, c):
self.server.emit("log", data=c, namespace="/online")
class commandProcessor:
remac = re.compile('^(\d+)\.\d+ ([0-9a-f\:]+) ', re.M)
def __init__(self, cmd, storage):
self.cmd = cmd
self.storage = storage
def process(self, line):
result = self.remac.search(line)
if result:
return [int(result.group(1)), result.group(2)]
else:
return None
def run(self):
#args = shlex.split(self.cmd)
p = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, shell=True, universal_newlines=True)
for line in p.stdout:
logging.debug("read line " + line)
[time, mac] = self.process(line)
if mac:
logging.debug(mac)
self.storage.addMac([time, mac])
p.poll()
class spaceAPI:
def __init__(self, template, s):
self.template = template
self.s = s
def application(self, environ, start_response):
path = environ.get('PATH_INFO', '').lstrip('/')
if path == 'api/status':
f = open(self.template)
j = json.load(f)
f.close()
if (self.s.clients == 0):
online = False
else:
online = True
j['state']['open'] = online
start_response('200 OK', [('Content-Type', 'application/json'), ('Access-Control-Allow-Origin', '*'), ('Cache-Control', 'no-cache')])
return [json.dumps(j).encode("utf-8")]
else:
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
return ['Not Found']
def main(args):
sio = socketio.Server(async_mode='gevent')
from gevent import pywsgi
s = storage(sio, args.database)
sAPI = spaceAPI(args.apifile, s)
app = socketio.Middleware(sio, sAPI.application)
server = pywsgi.WSGIServer(('', 8080), app, handler_class=WebSocketHandler)
cP = commandProcessor(args.cmd, s)
g = Greenlet(cP.run)
g.start()
server.start()
logging.info("server started")
g.join()
#s.printStats()
if __name__ == "__main__":
# execute only if run as a script
#logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.WARNING)
parser = argparse.ArgumentParser(description="Monitor the status of a hackerspace")
parser.add_argument("--database", help="File used to store the MACs that are almost permanently online", default="/var/lib/space-status-indicator/alwaysOn.db")
parser.add_argument("--cmd", help="The command that is executed to monitor the traffic on the netwrok", default="tcpdump -tt -e -n -q -i eth0 broadcast or multicast")
parser.add_argument("--apifile", help="The JSON file that is used as a template for the hackerspace API", default="/var/lib/space-status-indicator/hackerspace.json")
args = parser.parse_args()
main(args)
| StarcoderdataPython |
1852119 | """
this application divides a video into segments when it finds motion specified by thresh value
it works with use of OPENCV to detect motion and uses FFMPEG to create an output file.
"""
import sys
from PyQt5 import QtGui
from PyQt5.QtWidgets import (QMainWindow, QLabel, QLineEdit, QPushButton,
QProgressBar, QStatusBar, QFileDialog, QApplication)
from vea.controller import Controller
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self._controller = Controller()
self.setGeometry(100, 100, 500, 600)
self.setFixedSize(500, 600)
self.setWindowTitle("Video Editing Automation")
self.setWindowIcon(QtGui.QIcon('./assets/icon.png')) # application window icon
# select file components
inputDetailsFileLabel = QLabel(self)
inputDetailsFileLabel.setText("Input Details ")
inputDetailsFileLabel.setFont(QtGui.QFont('Arial', 20, QtGui.QFont.Bold))
inputDetailsFileLabel.resize(200, 25)
inputDetailsFileLabel.move(20, 10)
self.selectFileLabel = QLabel(self)
self.selectFileLabel.setText("Select the file to edit")
self.selectFileLabel.resize(200, 27)
self.selectFileLabel.move(20, 50)
self.selectFileTextbox = QLineEdit(self)
self.selectFileTextbox.move(20, 80)
self.selectFileTextbox.resize(380, 27)
self.selectFileTextbox.setPlaceholderText('File Path')
self.totalFramesLabel = QLabel(self)
self.totalFramesLabel.setStyleSheet('color: red')
self.totalFramesLabel.move(20, 110)
self.videoFps = QLabel(self)
self.videoFps.setStyleSheet('color: red')
self.videoFps.move(20, 125)
btn = QPushButton("Browse", self)
btn.setStatusTip('Select the file to edit')
btn.clicked.connect(self.browseFiles)
btn.resize(btn.sizeHint())
btn.move(400, 80)
tip1 = QLabel(self)
tip1.setText("Tip : Select a video of your favourite formats, we will \n make sure that we find best motion "
"content \n and provide you the output files. ")
tip1.setFont(QtGui.QFont('Courier', 10))
tip1.resize(tip1.sizeHint())
tip1.move(20, 150)
# destination file components
outputDetailsFileLabel = QLabel(self)
outputDetailsFileLabel.setText("Output Details ")
outputDetailsFileLabel.setFont(QtGui.QFont('Arial', 20, QtGui.QFont.Bold))
outputDetailsFileLabel.resize(200, 25)
outputDetailsFileLabel.move(20, 210)
self.destinationFileLabel = QLabel(self)
self.destinationFileLabel.setText("Select the destination folder")
self.destinationFileLabel.resize(200, 27)
self.destinationFileLabel.move(20, 260)
self.destinationFileTextbox = QLineEdit(self)
self.destinationFileTextbox.move(20, 290)
self.destinationFileTextbox.resize(380, 27)
self.destinationFileTextbox.setPlaceholderText('Folder Path')
self.videoPercentCut = QLabel(self)
self.videoPercentCut.setStyleSheet('color: red')
self.videoPercentCut.move(20, 320)
btnDestination = QPushButton("Browse", self)
btnDestination.setStatusTip('Select the folder to store')
btnDestination.clicked.connect(self.browseFolders)
btnDestination.resize(btn.sizeHint())
btnDestination.move(400, 290)
tip1 = QLabel(self)
tip1.setText("Tip : We will create number of clips where, we find best \n motion "
"content and provide you the output files. ")
tip1.setFont(QtGui.QFont('Courier', 10))
tip1.resize(tip1.sizeHint())
tip1.move(20, 340)
# status components && variables
outputDetailsFileLabel = QLabel(self)
outputDetailsFileLabel.setText("Options & Status")
outputDetailsFileLabel.setFont(QtGui.QFont('Arial', 20, QtGui.QFont.Bold))
outputDetailsFileLabel.resize(250, 25)
outputDetailsFileLabel.move(20, 410)
self.progress = QProgressBar(self)
self.progress.setGeometry(20, 450, 460, 20)
destinationFileLabel = QLabel(self)
destinationFileLabel.setText("Enter a Threshold Value")
destinationFileLabel.resize(200, 27)
destinationFileLabel.move(20, 500)
self.thresholdTextbox = QLineEdit(self)
self.thresholdTextbox.move(20, 520)
self.thresholdTextbox.resize(130, 27)
self.thresholdTextbox.setPlaceholderText('ex. 25')
self.btnPlayContours = QPushButton("Play Live", self)
self.btnPlayContours.setStatusTip('Click to play your files with Motion Changes')
self.btnPlayContours.clicked.connect(self.playContours)
self.btnPlayContours.resize(120, 27)
self.btnPlayContours.move(200, 520)
self.btnCalculate = QPushButton("Create", self)
self.btnCalculate.setStatusTip('Click to create your files')
self.btnCalculate.clicked.connect(self.callMotionDetection)
self.btnCalculate.resize(120, 27)
self.btnCalculate.move(350, 520)
self.statusBar = QStatusBar(self)
self.setStatusBar(self.statusBar)
self._controller.progress.connect(self.setProgress)
self._controller.fps.connect(self.setVideoFpsLabel)
self._controller.frames.connect(self.setTotalFramesLabel)
# All Custom Methods
# select a input file
def browseFiles(self):
name = QFileDialog.getOpenFileName(None, "Open File", "~",
"Video Files (*.mp4 *.flv *.avi *.mov *.mpg *.mxf)")
self.selectFileTextbox.setText(str(name[0]))
# select the output folder
def browseFolders(self):
name = QFileDialog.getExistingDirectory(None, "Select Directory")
self.destinationFileTextbox.setText(name)
# set progress to the progress bar
def setProgress(self, value):
self.progress.setValue(value)
# set status to the status bar
def setStatusTipText(self, value):
self.statusBar.showMessage(value, 10)
# set total number of frames on the window
def setTotalFramesLabel(self, value):
self.totalFramesLabel.setText("Total Frames :- " + str(value))
self.totalFramesLabel.resize(self.totalFramesLabel.sizeHint())
# set video fps on the window
def setVideoFpsLabel(self, value):
self.videoFps.setText("Video FPS :- " + str(value))
self.videoFps.resize(self.videoFps.sizeHint())
# set percentage of video output to the input on the window
def setVideoPercentCuts(self, value):
self.videoPercentCut.setText("Percentage of video cut out :- " + str(value) + "%")
self.videoPercentCut.resize(self.videoPercentCut.sizeHint())
# play the video with motion algorithm applied
def playContours(self):
self.btnPlayContours.setEnabled(False)
threshold = self.thresholdTextbox.text()
inputFile = self.selectFileTextbox.text()
outputFile = self.destinationFileTextbox.text()
if threshold and inputFile and outputFile:
self._controller.set_threshold(threshold)
self._controller.set_input_file(inputFile)
self._controller.set_output_fol(outputFile)
self._controller.start_display(threshold)
self.btnPlayContours.setEnabled(True)
else:
self.btnPlayContours.setEnabled(True)
# process the video and create output files
def callMotionDetection(self):
self.btnCalculate.setEnabled(False)
threshold = self.thresholdTextbox.text()
inputFile = self.selectFileTextbox.text()
outputFile = self.destinationFileTextbox.text()
if threshold and inputFile and outputFile:
self._controller.set_threshold(threshold)
self._controller.set_input_file(inputFile)
self._controller.set_output_fol(outputFile)
self._controller.start_processing()
self.btnCalculate.setEnabled(True)
else:
self.btnCalculate.setEnabled(True)
def main():
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| StarcoderdataPython |
4976260 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class NetVLAD(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, dim, num_clusters=64):
"""
Args:
dim : int
Dimension of descriptors
num_clusters : int
The number of clusters
"""
super(NetVLAD, self).__init__()
self.num_clusters = num_clusters
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=False)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
clsts_assign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clsts_assign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :] # sort, descending
alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(
torch.from_numpy(alpha * clsts_assign).unsqueeze(2).unsqueeze(3)
)
self.conv.bias = None
def forward(self, x, crm=None):
N, C = x.shape[:2]
# soft-assignment
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = F.softmax(soft_assign, dim=1)
if crm is not None:
assert (
crm.shape[0] == N and crm.shape[1] == 1 and crm.shape[2:] == x.shape[2:]
)
soft_assign = torch.mul(soft_assign, crm.view(N, 1, -1))
x_flatten = x.view(N, C, -1)
# calculate residuals to each clusters
vlad = torch.zeros(
(N, self.num_clusters, C), dtype=x.dtype, layout=x.layout, device=x.device
)
# slower than non-looped, but lower memory usage
for c in range(self.num_clusters):
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3) - \
self.centroids[c : c + 1, :].expand(x_flatten.size(-1), -1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign[:, c : c + 1, :].unsqueeze(2)
vlad[:, c : c + 1, :] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2) # intra-normalization
vlad = vlad.view(N, -1) # flatten
vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
return vlad
| StarcoderdataPython |
1863255 | import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
import csv
import time
import math
import threading
import numpy as np
from picamera import PiCamera
from lib_utils import *
from lib_camera import Camera
from lib_blob import Blob
from lib_fin import Fin
from lib_leds import LEDS
status = ['home', 'orbit', 'terminate']
def initialize():
threading.Thread(target=caudal.run).start()
threading.Thread(target=dorsal.run).start()
threading.Thread(target=pectol.run).start()
threading.Thread(target=pector.run).start()
# logger instance for overall status
with open('{}.log'.format(U_FILENAME), 'w') as f:
f.truncate()
f.write('t_passed :: t_loop :: t_observe_r :: #blob_r_pix :: #blob_l_pix\n')
leds.on()
time.sleep(1)
leds.off()
def terminate():
caudal.terminate()
dorsal.terminate()
pectol.terminate()
pector.terminate()
leds.on()
time.sleep(1)
leds.off()
GPIO.cleanup()
def log_status(t_passed, t_loop, t_observe_r, blob_r_size, blob_l_size, status):
with open('{}.log'.format(U_FILENAME), 'a') as f:
f.write(
' {:6.3f} :: {:6.3f} :: {:6.3f} :: {:6} :: {:6} :: {}\n'.format(
t_passed, t_loop, t_observe_r, blob_r_size, blob_l_size, status
)
)
def log_blobs(t_passed, blobs, side):
#print(blobs)
#print(blobs.shape)
blob_list = U_CAM_YRES * np.ones((10, 2)) # max 10 blobs, remaining values U_CAM_YRES
if blobs.size:
blob_list[:blobs.shape[0], :blobs.shape[1]] = blobs
blob_list = blob_list.reshape((1, blob_list.size))
with open('{}_{}.csv'.format(U_FILENAME, side), 'a') as f:
writer = csv.writer(f, delimiter=',')
row = []
row.append(t_passed)
#for i in range(blob_list.size):
for i in range(10):
row.append(blob_list[0, i])
writer.writerow(row)
def home(blobs_right, blobs_left, total_blob_pixels):
# control
thresh_orbit = 0.4 # [%], blob_pixels / total_no_pixels
total_no_pixels = (U_CAM_YRES / 2)**2 * math.pi # pixels in spherical FOV
# blob in front
if blobs_right.size and blobs_left.size:
caudal.on()
# keep centered
if (blobs_right[0, 0] > blobs_left[0, 0] + 5):
pector.set_frequency(2.5)
pector.on()
pectol.off()
print('move fwd and ccw')
elif (blobs_left[0, 0] > blobs_right[0, 0] + 5):
pectol.set_frequency(2.5)
pectol.on()
pector.off()
print('move fwd and cw')
else:
print('move fwd')
pector.off()
pectol.off()
# initialize orbiting?
if orbit:
blob_ratio = total_blob_pixels / (2 * total_no_pixels)
if blob_ratio > thresh_orbit:
status = 'orbit'
# blob to the right
elif blobs_right.size:
freq_l = 2 + 6 * (U_CAM_YRES/2 - blobs_right[0, 0]) / U_CAM_YRES
pectol.set_frequency(abs(freq_l))
#print('freq_l is {}'.format(freq_l))
print('turn cw')
pectol.on()
pector.off()
if (blobs_right[0, 0] > 55):
caudal.on()
else:
caudal.off()
# blob to the left
elif blobs_left.size:
freq_r = 2 + 8 * (U_CAM_YRES/2 - blobs_left[0, 0]) / U_CAM_YRES
pector.set_frequency(abs(freq_r))
#print('freq_r is {}'.format(freq_r))
print('turn ccw')
pector.on()
pectol.off()
if (blobs_left[0, 0] > 55):
caudal.on()
else:
caudal.off()
# blob behind or lost
else:
print('lost blob, wait')
pector.off()
pectol.off()
caudal.off()
def orbit(blobs_right):
thresh_heading = 0.2
horizontal_offset = blobs_right[0, 0] / (U_CAM_YRES / 2)
if horizontal_offset > thresh_heading:
print('turn ccw')
#pector.on()
#pectol.off()
#caudal.off()
elif horizontal_offset < thresh_heading:
print('turn cw')
#pectol.on()
#pector.off()
#caudal.off()
else:
print('move fwd')
#caudal.on()
#pector.off()
#pectol.off()
def depth_ctrl_from_cam(blobs_right, blobs_left):
if not blobs_right.size and not blobs_left.size:
print('move up')
dorsal.off()
return
if not blobs_right.size:
blobs_right = blobs_left
elif not blobs_left.size:
blobs_left = blobs_right
if ((blobs_right[0, 1] + blobs_left[0, 1]) / 2) < 0:
print('move down')
dorsal.on()
else:
print('move up')
dorsal.off()
def main(run_time=60):
# loop
t_start = time.time()
t_loop_prev = time.time()
while time.time() - t_start < run_time:
# observe right side of environment and measure time for logging
t_observe_r = time.time()
img = camera.capture('right')
blobs_right = Blob(img, 'right', 40)
blobs_right.blob_detect()
blobs_r = blobs_right.blobs
t_observe_r = time.time() - t_observe_r
# observe left side of environment
img = camera.capture('left')
blobs_left = Blob(img, 'left', 40)
blobs_left.blob_detect()
blobs_l = blobs_left.blobs
total_blob_pixels = blobs_left.blob_size + blobs_right.blob_size
# discard blobs that are reflected on the surface
if blobs_r.size:
blobs_r_ind = np.where(blobs_r == min(blobs_r[:, 1]))
blobs_r = blobs_r[blobs_r_ind[0], :]
if blobs_l.size:
blobs_l_ind = np.where(blobs_l == min(blobs_l[:, 1]))
blobs_l = blobs_l[blobs_l_ind[0], :]
# act based on status
if status == 'home':
home(blobs_r, blobs_l, total_blob_pixels)
elif status == 'orbit':
orbit(blobs_r)
elif status == 'terminate':
terminate()
# ctrl depth
if depth_ctrl:
depth_ctrl_from_cam(blobs_r, blobs_l)
# log status and blobs
t_now = time.time()
t_passed = t_now - t_start
t_loop = t_now - t_loop_prev
t_loop_prev = time.time()
log_status(t_passed, t_loop, t_observe_r, blobs_right.blob_size, blobs_left.blob_size, status)
log_blobs(round(t_passed, 3), blobs_r, 'right')
log_blobs(round(t_passed, 3), blobs_l, 'left')
# delete class instances to avoid memory overload
del blobs_right
del blobs_left
terminate()
# homing plus orbiting, 2D or 3D
status = 'home'
orbit = False
depth_ctrl = True
caudal = Fin(20, 21, 5)
dorsal = Fin(19, 26, 5)
pectol = Fin(18, 23, 6)
pector = Fin(4, 17, 6)
camera = Camera()
leds = LEDS()
time.sleep(5)
initialize()
main(120)
| StarcoderdataPython |
392836 | import csv
import re
from collections import namedtuple
from datetime import datetime
from decimal import Decimal
from enum import unique
from functools import reduce
from pathlib import Path
from typing import (
Dict,
Iterable,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
)
from bankroll.broker import AccountData, configuration, csvsectionslicer, parsetools
from bankroll.model import (
AccountBalance,
Activity,
Bond,
Cash,
CashPayment,
Currency,
Instrument,
Option,
Position,
Stock,
Trade,
TradeFlags,
)
@unique
class Settings(configuration.Settings):
STATEMENT = "Statement"
@property
def help(self) -> str:
if self == self.STATEMENT:
return "A local path to an exported statement CSV of Vanguard positions and trades."
else:
return ""
@classmethod
def sectionName(cls) -> str:
return "Vanguard"
class PositionsAndActivity(NamedTuple):
positions: List[Position]
activity: List[Activity]
class _VanguardPosition(NamedTuple):
investmentName: str
symbol: str
shares: str
sharePrice: str
totalValue: str
class _VanguardPositionAndActivity(NamedTuple):
position: _VanguardPosition
activity: List[Activity]
def _guessInstrumentForInvestmentName(name: str) -> Instrument:
instrument: Instrument
if re.match(r"^.+\s\%\s.+$", name):
# TODO: Determine valid CUSIP for bonds
instrument = Bond(name, currency=Currency.USD, validateSymbol=False)
else:
instrument = Stock(name, currency=Currency.USD)
return instrument
def _parseVanguardPositionAndActivity(vpb: _VanguardPositionAndActivity) -> Position:
return _parseVanguardPosition(vpb.position, vpb.activity)
def _activityAffectsSymbol(activity: Activity, symbol: str) -> bool:
if isinstance(activity, CashPayment):
return activity.instrument is not None and activity.instrument.symbol == symbol
elif isinstance(activity, Trade):
return (
isinstance(activity.instrument, Option)
and activity.instrument.underlying == symbol
) or activity.instrument.symbol == symbol
else:
return False
def _realizedBasisForSymbol(
symbol: str, activity: Iterable[Activity]
) -> Optional[Cash]:
def f(basis: Optional[Cash], activity: Activity) -> Optional[Cash]:
if isinstance(activity, CashPayment):
return basis - activity.proceeds if basis else -activity.proceeds
elif isinstance(activity, Trade):
return basis - activity.proceeds if basis else -activity.proceeds
else:
raise ValueError(f"Unexpected type of activity: {activity}")
return reduce(f, (t for t in activity if _activityAffectsSymbol(t, symbol)), None)
def _parseVanguardPosition(p: _VanguardPosition, activity: List[Activity]) -> Position:
instrument: Instrument
if len(p.symbol) > 0:
instrument = Stock(p.symbol, currency=Currency.USD)
else:
instrument = _guessInstrumentForInvestmentName(p.investmentName)
qty = Decimal(p.shares)
realizedBasis = _realizedBasisForSymbol(instrument.symbol, activity)
assert realizedBasis, "Invalid realizedBasis: %s for %s" % (
realizedBasis,
instrument,
)
return Position(instrument=instrument, quantity=qty, costBasis=realizedBasis)
def _parsePositions(
path: Path, activity: List[Activity], lenient: bool = False
) -> List[Position]:
with open(path, newline="") as csvfile:
criterion = csvsectionslicer.CSVSectionCriterion(
startSectionRowMatch=["Account Number"],
endSectionRowMatch=[],
rowFilter=lambda r: r[1:6],
)
sections = csvsectionslicer.parseSectionsForCSV(csvfile, [criterion])
if len(sections) == 0:
return []
vanPositions = (_VanguardPosition._make(r) for r in sections[0].rows)
vanPosAndBases = list(
map(lambda pos: _VanguardPositionAndActivity(pos, activity), vanPositions)
)
return list(
parsetools.lenientParse(
vanPosAndBases,
transform=_parseVanguardPositionAndActivity,
lenient=lenient,
)
)
def _parsePositionsAndActivity(
path: Path, lenient: bool = False
) -> PositionsAndActivity:
activity = _parseTransactions(path, lenient=lenient)
positions = _parsePositions(path, activity=activity, lenient=lenient)
return PositionsAndActivity(positions, activity)
class _VanguardTransaction(NamedTuple):
tradeDate: str
settlementDate: str
transactionType: str
transactionDescription: str
investmentName: str
symbol: str
shares: str
sharePrice: str
principalAmount: str
commissionFees: str
netAmount: str
accruedInterest: str
accountType: str
def _parseVanguardTransactionDate(datestr: str) -> datetime:
return datetime.strptime(datestr, "%m/%d/%Y")
def _forceParseVanguardTransaction(
t: _VanguardTransaction, flags: TradeFlags
) -> Optional[Trade]:
instrument: Instrument
if len(t.symbol) > 0:
instrument = Stock(t.symbol, currency=Currency.USD)
else:
instrument = _guessInstrumentForInvestmentName(t.investmentName)
totalFees = Decimal(t.commissionFees)
amount = Decimal(t.principalAmount)
if t.transactionDescription == "Redemption":
shares = Decimal(t.shares) * (-1)
else:
shares = Decimal(t.shares)
return Trade(
date=_parseVanguardTransactionDate(t.tradeDate),
instrument=instrument,
quantity=shares,
amount=Cash(currency=Currency.USD, quantity=amount),
fees=Cash(currency=Currency.USD, quantity=totalFees),
flags=flags,
)
def _parseVanguardTransaction(t: _VanguardTransaction) -> Optional[Activity]:
if t.transactionType == "Dividend":
return CashPayment(
date=_parseVanguardTransactionDate(t.tradeDate),
instrument=Stock(
t.symbol if t.symbol else t.investmentName, currency=Currency.USD
),
proceeds=Cash(currency=Currency.USD, quantity=Decimal(t.netAmount)),
)
validTransactionTypes = set(
[
"Buy",
"Sell",
"Reinvestment",
"Corp Action (Redemption)",
"Transfer (outgoing)",
]
)
if t.transactionType not in validTransactionTypes:
return None
flagsByTransactionType = {
"Buy": TradeFlags.OPEN,
"Sell": TradeFlags.CLOSE,
"Reinvestment": TradeFlags.OPEN | TradeFlags.DRIP,
"Corp Action (Redemption)": TradeFlags.CLOSE,
"Transfer (outgoing)": TradeFlags.CLOSE,
}
return _forceParseVanguardTransaction(
t, flags=flagsByTransactionType[t.transactionType]
)
# Transactions will be ordered from newest to oldest
def _parseTransactions(path: Path, lenient: bool = False) -> List[Activity]:
with open(path, newline="") as csvfile:
transactionsCriterion = csvsectionslicer.CSVSectionCriterion(
startSectionRowMatch=["Account Number", "Trade Date"],
endSectionRowMatch=[],
rowFilter=lambda r: r[1:-1],
)
sections = csvsectionslicer.parseSectionsForCSV(
csvfile, [transactionsCriterion]
)
if len(sections) == 0:
return []
return list(
filter(
None,
parsetools.lenientParse(
(_VanguardTransaction._make(r) for r in sections[0].rows),
transform=_parseVanguardTransaction,
lenient=lenient,
),
)
)
class VanguardAccount(AccountData):
_positionsAndActivity: Optional[PositionsAndActivity] = None
@classmethod
def fromSettings(
cls, settings: Mapping[configuration.Settings, str], lenient: bool
) -> "VanguardAccount":
statement = settings.get(Settings.STATEMENT)
return cls(statement=Path(statement) if statement else None, lenient=lenient)
def __init__(self, statement: Optional[Path] = None, lenient: bool = False):
self._statement = statement
self._lenient = lenient
super().__init__()
def positionsAndActivity(self) -> Optional[PositionsAndActivity]:
if not self._statement:
return None
if not self._positionsAndActivity:
self._positionsAndActivity = _parsePositionsAndActivity(
self._statement, lenient=self._lenient
)
return self._positionsAndActivity
def positions(self) -> Iterable[Position]:
paa = self.positionsAndActivity()
return paa.positions if paa else []
def activity(self) -> Iterable[Activity]:
paa = self.positionsAndActivity()
return paa.activity if paa else []
def balance(self) -> AccountBalance:
# Vanguard puts all cash into money market funds, which will show up to
# us as Positions, not uninvested cash.
return AccountBalance(cash={})
| StarcoderdataPython |
6610680 | <gh_stars>10-100
#**********************************************************************************************
# Traffic Emulator for Network Services
# Copyright 2020 VMware, Inc
# The BSD-2 license (the "License") set forth below applies to all parts of
# the Traffic Emulator for Network Services project. You may not use this file
# except in compliance with the License.
#
# BSD-2 License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE
#**********************************************************************************************
from flask import Flask
from flask_swagger_ui import get_swaggerui_blueprint
import argparse
from flask import request
import os
from TE_WRAP import *
app=Flask(__name__)
SWAGGER_URL = '/swagger'
API_URL = '/static/setup_te_swagger.json'
SWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(
SWAGGER_URL,API_URL,
config={
'app_name': "Traffic Engine"
}
)
app.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)
@app.route('/api/setup_te')
def setup_te():
controller_ip = request.args['te_controller_ip']
user = request.args['user']
password = request.args.get('passwd', None)
dockerhub_repo = request.args.get('dockerhub_repo', 'projects.registry.vmware.com/tens/te:v2.0')
te_controller_obj= { 'host': controller_ip , 'user': user , 'passwd': password }
#Check whether the controller Machine is reachable or not
return_val = os.system("ping -c 5 -w 5 {}".format(controller_ip))
if return_val != 0:
return {"status" : False, "statusmessage" : "TE controller IP not reachable"}
#ssh connection to the controller machine
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if password:
ssh.connect(controller_ip, username=user, password=password)
else:
ssh.connect(controller_ip, username=user)
#check if python is installed in the controller machine, as it it required to run GET_AND_RUN_DOCKER_IMAGE.py file
cmd = "which python"
stdin,stdout,stderr = ssh.exec_command(cmd)
out = stdout.readlines()
if (not(out)):
return {"status" : False, "statusmessage" : "python not installed in TE controller"}
tens_te_obj = TensTE(te_controller_obj)
response = tens_te_obj.setup_te(dockerhub_repo=dockerhub_repo)
if response.get('status', False):
flask_port = response.get('statusmessage', {}).get('flask', None)
if flask_port:
te_controller_swagger_ui_url = "{}:{}/swagger".format(controller_ip, flask_port)
response['statusmessage']['te_controller_swagger_ui_url'] = te_controller_swagger_ui_url
else:
response['status'] = False
response['statusmessage'] = "Unable to find Flask port"
return response
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-fp','--flask_port',type=int, required=False, default=4000,
help='flask port where swagger UI will run')
args = parser.parse_args()
return args
if __name__ == "__main__":
input_args = parse_arguments()
flask_port = input_args.flask_port
app.run(host="0.0.0.0", port=flask_port)
| StarcoderdataPython |
3473718 | <reponame>jjaramillo34/fastapi-mongo
from fastapi import APIRouter, Body
from fastapi.encoders import jsonable_encoder
from apps.server.database import (
add_student,
delete_student,
retrieve_student,
retrieve_students,
update_student,
)
from apps.server.models.student import (
ErrorResponseModel,
ResponseModel,
StudentSchema,
UpdateStudentModel,
)
router = APIRouter()
@router.get("/", response_description="Students retrieved")
async def get_students():
students = await retrieve_students()
if students:
return ResponseModel(students, "Students data retrieved successfully")
return ResponseModel(students, "Empty list returned")
@router.post("/", response_description="Student data added into the database")
async def add_student_data(student: StudentSchema = Body(...)):
student = jsonable_encoder(student)
new_student = await add_student(student)
return ResponseModel(new_student, "Student added successfully.")
@router.get("/{id}", response_description="Student data retrieved")
async def get_student_data(id):
student = await retrieve_student(id)
if student:
return ResponseModel(student, "Student data retrieved successfully")
return ErrorResponseModel("An error occurred.", 404, "Student doesn't exist.")
@router.put("/{id}")
async def update_student_data(id: str, req: UpdateStudentModel = Body(...)):
req = {k: v for k, v in req.dict().items() if v is not None}
updated_student = await update_student(id, req)
if updated_student:
return ResponseModel(
"Student with ID: {} name update is successful".format(id),
"Student name updated successfully",
)
return ErrorResponseModel(
"An error occurred",
404,
"There was an error updating the student data.",
)
@router.delete("/{id}", response_description="Student data deleted from the database")
async def delete_student_data(id: str):
deleted_student = await delete_student(id)
if deleted_student:
return ResponseModel(
"Student with ID: {} removed".format(id), "Student deleted successfully"
)
return ErrorResponseModel(
"An error occurred", 404, "Student with id {0} doesn't exist".format(id)
)
| StarcoderdataPython |
3569181 | <gh_stars>1-10
"""
This is a utility script to output an alphabetised line-by-line
difference comparison of two files.
"""
#!/usr/bin/env python
import re
FILE_1 = 'output.txt'
FILE_2 = 'YAWL.list'
OUTPUT = {}
with open(FILE_1, 'r') as f:
for line in f:
WORD = line.strip().lower().split('/')[0].split(' ')[0]
if len(WORD) <= 9 and len(WORD) >= 4 and len(re.findall('^[a-zA-Z]*$', WORD)) == 1:
OUTPUT[WORD] = 1
with open(FILE_2, 'r') as f:
for line in f:
WORD = line.strip().lower().split('/')[0].split(' ')[0]
if len(WORD) <= 9 and len(WORD) >= 4 and len(re.findall('^[a-zA-Z]*$', WORD)) == 1:
if WORD not in OUTPUT:
OUTPUT[WORD] = 1
else:
OUTPUT[WORD] += 1
SORTED_OUT = sorted([word for word, count in OUTPUT.items() if count == 1])
with open("difference.txt", "wb") as output_file:
output_file.writelines("\n".join(SORTED_OUT))
| StarcoderdataPython |
312053 |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import sys
# test the python version
major, minor = sys.version_info[0:2]
if (major, minor) < (3,6):
sys.stderr.write('\nPython 3.6 or later is required for this package.\n')
sys.exit(1)
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
# Get version from __init__.py
from FCPGtools import __version__
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding = 'utf-8') as f:
long_description = f.read()
setup(
name = "FCPGtools",
version = __version__,
author = "<NAME>",
author_email = "<EMAIL>",
description = "Tools to make flow-conditioned parameter grids.",
long_description = long_description,
long_description_content_type = "text/x-rst",
url="https://code.usgs.gov/StreamStats/CPGtools",
packages = find_packages(),
classifiers = [
"Programming Language :: Python :: 3",
"Operating System :: Linux",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
],
python_requires = '>=3.6',
)
| StarcoderdataPython |
1957628 | from os import listdir, getcwd, system
from os.path import isfile, isdir
from sys import stderr
def convert_ui(*args):
"""
Helper function for PyQt5 package to convert .ui files to .py files.
:param args: names of the .ui files to convert to .py files in the current working directory only.
if no arguments are passed, all .ui files in the current working directory get converted.
:return: None
"""
directory_files = [file for file in listdir(getcwd()) if isfile(file)]
uifiles = [file for file in directory_files if file[-3:] == '.ui']
if len(args) == 0:
for file in uifiles:
system(f'pyuic5 {file} -o {file[:-3] + ".py"}')
else:
for file in args:
if file in uifiles:
system(f'pyuic5 {file} -o {file[:-3] + ".py"}')
else:
print(f"Can't fine {file} in the current working directory.", file=stderr)
class Converter:
"""
A helper class to make the conversion of .ui PyQt5 Designer's files to .py files easier. just by making an instance
of the class and calling the convert.ui method at the entry point of your code will convert the files without
having to deal with the cmd or terminal (dev tool to automate the conversion command). Also it helps to separate the
.ui files from the .py files in separate folders.
"""
def __init__(self, ui_directory, py_directory=None):
"""
if you didn't pass py_directory argument, it will be the same as the ui_directory
:param ui_directory: Absolute path to the file that contains the ui files
:param py_directory: Absolute path to the file where the output .py files will be created
"""
self.ui_directory = ui_directory if isdir(ui_directory) else None
if py_directory is None:
self.py_directory = ui_directory
else:
self.py_directory = py_directory if isdir(py_directory) else None
def convert_ui(self, *args):
"""
Convert the passed .ui files if they are found in the ui_directory path to .py files created in the py_directory.
if no arguments are passed, all the ui files in the ui_directory will be converted
:param args: Names of the ui files to be converted
:return: None
"""
if self.ui_directory is None or self.py_directory is None:
print("Error in ui path or py path", file=stderr)
return
uifiles = [file for file in listdir(self.ui_directory) if isfile(self.ui_directory + "\\" + file) and file[-3:] == '.ui']
if len(uifiles) == 0:
print(f"No current ui files in {self.ui_directory}.", file=stderr)
else:
if len(args) == 0:
for file in uifiles:
self.convert_file(file)
else:
for file in args:
if file in uifiles:
self.convert_file(file)
else:
print(f"Can't fine {file} in the current working directory.", file=stderr)
def convert_file(self, file):
"""
:param file: name of a ui file format to be converted
:return: None
"""
try:
absolute_uifile_path = f'"{self.ui_directory}\\{file}"'
absolute_pyfile_path = f'"{self.py_directory}\\{file[:-3] + ".py"}"'
system(f'pyuic5 {absolute_uifile_path} -o {absolute_pyfile_path}')
except Exception as error:
print(error, file=stderr)
| StarcoderdataPython |
3411337 | <gh_stars>100-1000
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:<EMAIL>)
import argparse
import mango
import typing
from decimal import Decimal
from .pairwiseelement import PairwiseElement
from ...modelstate import ModelState
# # 🥭 FixedSpreadElement class
#
# Ignores any input `Order`s (so probably best at the head of the chain). Builds orders using a fixed spread
# value.
#
class FixedSpreadElement(PairwiseElement):
def __init__(self, spreads: typing.Sequence[Decimal]) -> None:
super().__init__()
self.spreads: typing.Sequence[Decimal] = spreads
@staticmethod
def add_command_line_parameters(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--fixedspread-value", type=Decimal, action="append",
help="fixed value to apply to the mid-price to create the BUY and SELL price. Can be specified multiple times for multiple levels of BUYs and SELLs.")
@staticmethod
def from_command_line_parameters(args: argparse.Namespace) -> "FixedSpreadElement":
if args.fixedspread_value is None:
raise Exception("No spread value specified. Try the --fixedspread-value parameter?")
spreads: typing.Sequence[Decimal] = args.fixedspread_value
return FixedSpreadElement(spreads)
def process_order_pair(self, context: mango.Context, model_state: ModelState, index: int, buy: typing.Optional[mango.Order], sell: typing.Optional[mango.Order]) -> typing.Tuple[typing.Optional[mango.Order], typing.Optional[mango.Order]]:
# If no spread is explicitly specified for this element, just use the last specified spread.
spread: Decimal = self.spreads[index] if index < len(self.spreads) else self.spreads[-1]
half_spread: Decimal = spread / 2
price: mango.Price = model_state.price
new_buy: typing.Optional[mango.Order] = None
new_sell: typing.Optional[mango.Order] = None
if buy is not None:
new_buy_price: Decimal = price.mid_price - half_spread
new_buy = buy.with_price(new_buy_price)
self._logger.debug(f"""Order change - using fixed spread of {spread:,.8f} - new BUY price {new_buy_price:,.8f} is {half_spread:,.8f} from mid price {price.mid_price:,.8f}:
Old: {buy}
New: {new_buy}""")
if sell is not None:
new_sell_price: Decimal = price.mid_price + half_spread
new_sell = sell.with_price(new_sell_price)
self._logger.debug(f"""Order change - using fixed spread of {spread:,.8f} - new SELL price {new_sell_price:,.8f} is {half_spread:,.8f} from mid price {price.mid_price:,.8f}:
Old: {sell}
New: {new_sell}""")
return new_buy, new_sell
def __str__(self) -> str:
return f"« FixedSpreadElement using spreads {self.spreads} »"
| StarcoderdataPython |
8171260 | <reponame>VChristiaens/spec_fit
#! /usr/bin/env python
"""
Module for simplex or grid search of best fit spectrum in a template library.
"""
__author__ = '<NAME>'
__all__ = ['best_fit_tmp',
'get_chi']
from datetime import datetime
from multiprocessing import cpu_count
import numpy as np
import os
from scipy.optimize import minimize
from .config import time_ini, timing, time_fin, pool_map, iterable
from .chi import gof_scal
from .model_resampling import resample_model
from .utils_spec import extinction, find_nearest
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def get_chi(lbda_obs, spec_obs, err_obs, tmp_name, tmp_reader,
search_mode='simplex', lambda_scal=None, scale_range=(0.1,10,0.01),
ext_range=None, dlbda_obs=None, instru_corr=None, instru_res=None,
instru_idx=None, use_weights=True, filter_reader=None,
simplex_options=None, red_chi2=True, remove_nan=False,
force_continue=False, min_npts=1, verbose=False, **kwargs):
""" Routine calculating chi^2, optimal scaling factor and optimal
extinction for a given template spectrum to match an observed spectrum.
Parameters
----------
lbda_obs : numpy 1d ndarray or list
Wavelength of observed spectrum. If several instruments, should be
ordered per instrument, not necessarily as monotonically increasing
wavelength. Hereafter, n_ch = len(lbda_obs).
spec_obs : numpy 1d ndarray or list
Observed spectrum for each value of lbda_obs.
err_obs : numpy 1d/2d ndarray or list
Uncertainties on the observed spectrum. If 2d array, should be [2,n_ch]
where the first (resp. second) column corresponds to lower (upper)
uncertainty, and n_ch is the length of lbda_obs and spec_obs.
tmp_name : str
Template spectrum filename.
tmp_reader : python routine
External routine that reads a model file and returns a 3D numpy array,
where the first column corresponds to wavelengths, the second
contains flux values, and the third the uncertainties on the flux.
search_mode: str, opt {'simplex', 'grid'}
How is the best fit template found? Simplex or grid search.
lambda_scal: float, optional
Wavelength where a first scaling will be performed between template
and observed spectra. If not provided, the middle wavelength of the
osberved spectra will be considered.
scale_range: tuple, opt
If grid search, this parameter should be provided as a tuple of 3
floats: lower limit, upper limit and step of the grid search for the
scaling factor to be applied AFTER the first rough scaling (i.e.
scale_range should always encompass 1).
ext_range: tuple or None, opt
If None: differential extinction is not to be considered as a free
parameter. Elif a tuple of 3 floats is provided, differential extinction
will be considered, with the floats as lower limit, upper limit and step
of the grid search.
Note: if simplex search, the range is still used to set a chi of
np.inf outside of the range.
dlbda_obs: numpy 1d ndarray or list, optional
Spectral channel width for the observed spectrum. It should be provided
IF one wants to weigh each point based on the spectral
resolution of the respective instruments (as in Olofsson et al. 2016).
instru_corr : numpy 2d ndarray or list, optional
Spectral correlation throughout post-processed images in which the
spectrum is measured. It is specific to the combination of instrument,
algorithm and radial separation of the companion from the central star.
Can be computed using distances.spectral_correlation(). In case of
a spectrum obtained with different instruments, build it with
distances.combine_corrs(). If not provided, it will consider the
uncertainties in each spectral channels are independent. See Greco &
Brandt (2017) for details.
instru_res : float or list of floats/strings, optional
The mean instrumental spectral resolution(s) OR filter names. This is
used to convolve the model spectrum. If several instruments are used,
provide a list of spectral resolution values / filter names, one for
each instrument used.
instru_idx: numpy 1d array, optional
1d array containing an index representing each instrument used
to obtain the spectrum, label them from 0 to n_instru. Zero for points
that don't correspond to any instru_res provided above, and i in
[1,n_instru] for points associated to instru_res[i-1]. This parameter
must be provided if the spectrum consists of points obtained with
different instruments.
use_weights: bool, optional
For the likelihood calculation, whether to weigh each point of the
spectrum based on the spectral resolution or bandwith of photometric
filters used. Weights will be proportional to dlbda_obs/lbda_obs if
dlbda_obs is provided, or set to 1 for all points otherwise.
filter_reader: python routine, optional
External routine that reads a filter file and returns a 2D numpy array,
where the first column corresponds to wavelengths, and the second
contains transmission values. Important: if not provided, but strings
are detected in instru_res, the default format assumed for the files:
- first row containing header
- starting from 2nd row: 1st column: WL in mu, 2nd column: transmission
Note: files should all have the same format and wavelength units.
red_chi2: bool, optional
Whether to compute the reduced chi square. If False, considers chi^2.
remove_nan: bool, optional
Whether to remove NaN values from template spectrum BEFORE resampling
to the wavelength sampling of the observed spectrum. Whether it is set
to True or False, a check is made just before chi^2 is calculated
(after resampling), and only non-NaN values will be considered.
simplex_options: dict, optional
The scipy.optimize.minimize simplex (Nelder-Mead) options.
force_continue: bool, optional
In case of issue with the fit, whether to continue regardless (this may
be useful in an uneven spectral library, where some templates have too
few points for the fit to be performed).
verbose: str, optional
Whether to print more information when fit fails.
min_npts: int or None, optional
Iinimum number of (resampled) points to consider a template spectrum
valid in the minimization search. A Nan value will be returned for chi
if the condition is not met.
**kwargs: optional
Optional arguments to the scipy.optimize.minimize function.
Returns
-------
best_chi: float
goodness of fit scored by the template
best_scal:
best-fit scaling factor for the considered template
best_ext:
best-fit optical extinction for the considered template
"""
# read template spectrum
try:
lbda_tmp, spec_tmp, spec_tmp_err = tmp_reader(tmp_name,
verbose=verbose>1)
except:
msg = "{} could not be opened. Corrupt file?".format(tmp_name)
if force_continue:
if verbose:
print(msg)
return np.inf, 1, 0, 1
else:
raise ValueError(msg)
# look for any nan and replace
if remove_nan:
if np.isnan(spec_tmp).any() or np.isnan(spec_tmp_err).any():
bad_idx = np.where(np.isnan(spec_tmp))[0]
#bad_idx2 = np.where(np.isnan(spec_tmp_err))[0]
#all_bad = np.concatenate((bad_idx1,bad_idx2))
nch = len(lbda_tmp)
new_lbda = [lbda_tmp[i] for i in range(nch) if i not in bad_idx]
new_spec = [spec_tmp[i] for i in range(nch) if i not in bad_idx]
new_err = [spec_tmp_err[i] for i in range(nch) if i not in bad_idx]
lbda_tmp = np.array(new_lbda)
spec_tmp = np.array(new_spec)
spec_tmp_err = np.array(new_err)
# don't consider template spectra whose range is smaller than observed one
if lbda_obs[0] < lbda_tmp[0] or lbda_obs[-1] > lbda_tmp[-1]:
msg = "Wavelength range of template {} ({:.2f}, {:.2f})mu too short"
msg+= " compared to that of observed spectrum ({:.2f}, {:.2f})mu"
if force_continue:
if verbose:
print(msg.format(tmp_name, lbda_tmp[0],lbda_tmp[-1],
lbda_obs[0],lbda_obs[-1]))
return np.inf, 1, 0, len(lbda_tmp)-2
else:
raise ValueError(msg.format(tmp_name, lbda_tmp[0],lbda_tmp[-1],
lbda_obs[0],lbda_obs[-1]))
# try to resample tmp as observed spectrum - just used to raise error early
try:
_, spec_res = resample_model(lbda_obs, lbda_tmp, spec_tmp,
dlbda_obs=dlbda_obs,
instru_res=instru_res,
instru_idx=instru_idx,
filter_reader=filter_reader)
except:
msg = "Issue with resampling of template {}. Does the wavelength "
msg+= "range extend far enough ({:.2f}, {:.2f})mu?"
if force_continue:
if verbose:
print(msg.format(tmp_name, lbda_tmp[0],lbda_tmp[-1]))
return np.inf, 1, 0, len(lbda_tmp)-2
else:
raise ValueError(msg.format(tmp_name, lbda_tmp[0],lbda_tmp[-1]))
# first rescaling fac
if not lambda_scal:
lambda_scal = (lbda_obs[0]+lbda_obs[-1])/2
idx_cen = find_nearest(lbda_obs, lambda_scal)
idx_tmp = find_nearest(lbda_tmp, lambda_scal)
scal_fac = spec_obs[idx_cen]/spec_tmp[idx_tmp]
spec_tmp*=scal_fac
#spec_tmp_err*=scal_fac
# EDIT: Don't combine observed and template uncertainties;
# the best fit would be the most noisy tmp of the library!)
#err_obs = np.sqrt(np.power(spec_tmp_err,2)+np.power(err_obs,2))
# only consider non-zero and non-nan values for chi^2 calculation
all_conds = np.where(np.isfinite(spec_res))[0]
# cond2 = np.where(np.isfinite(err_obs))[0]
# cond3 = np.where(spec_tmp>0)[0]
# all_conds = np.sort(np.unique(np.concatenate((cond1,cond2,cond3))))
ngood_ch = len(all_conds)
#good_ch = (all_conds,)
# lbda_obs = lbda_obs[good_ch]
# spec_obs = spec_obs[good_ch]
# err_obs = err_obs[good_ch]
#lbda_tmp = lbda_tmp[good_ch]
#spec_tmp = spec_tmp[good_ch]
n_dof = ngood_ch-1-(ext_range is not None)
if n_dof <= 0:
msg = "Not enough dof with remaining points of template spectrum {}"
if force_continue:
if verbose:
print(msg.format(tmp_name))
return np.inf, 1, 0, n_dof
else:
raise ValueError(msg.format(tmp_name))
best_chi = np.inf
best_scal = np.nan
best_ext = np.nan
if ngood_ch < min_npts:
msg = "Unsufficient number of good points ({} < {}). Tmp discarded."
if verbose:
print(msg.format(ngood_ch,min_npts))
return best_chi, best_scal, best_ext, n_dof
# simplex search
if search_mode == 'simplex':
if simplex_options is None:
simplex_options = {'xatol': 1e-6, 'fatol': 1e-6, 'maxiter': 1000,
'maxfev': 5000}
if not ext_range:
p = (1,)
else:
AV_ini = (ext_range[0]+ext_range[1])/2
p = (1,AV_ini)
try:
res = minimize(gof_scal, p, args=(lbda_obs, spec_obs, err_obs,
lbda_tmp, spec_tmp, dlbda_obs,
instru_corr, instru_res,
instru_idx, use_weights,
filter_reader, ext_range),
method='Nelder-Mead', options=simplex_options,
**kwargs)
except:
msg = "Issue with simplex minimization for template {}. "
msg+= "Try grid search?"
if force_continue:
if verbose:
print(msg.format(tmp_name))
return np.inf, 1, 0, n_dof
else:
raise ValueError(msg.format(tmp_name))
best_chi = res.fun
if not ext_range:
best_scal = res.x
best_ext = 0
else:
best_scal, best_ext = res.x
if np.isfinite(best_scal):
best_scal*=scal_fac
# or grid search
elif search_mode == 'grid':
test_scale = np.arange(scale_range[0], scale_range[1], scale_range[2])
n_test = len(test_scale)
if ext_range is None:
test_ext = np.array([0])
n_ext = 1
elif isinstance(ext_range, tuple) and len(ext_range)==3:
test_ext = np.arange(ext_range[0], ext_range[1], ext_range[2])
n_ext = len(test_ext)
else:
raise TypeError("ext_range can only be None or tuple of length 3")
chi = np.zeros([n_test,n_ext])
for cc, scal in enumerate(test_scale):
for ee, AV in enumerate(test_ext):
p = (scal,AV)
chi[cc,ee] = gof_scal(p, lbda_obs, spec_obs, err_obs, lbda_tmp,
spec_tmp, dlbda_obs=dlbda_obs,
instru_corr=instru_corr,
instru_res=instru_res,
instru_idx=instru_idx,
use_weights=use_weights,
filter_reader=filter_reader,
ext_range=ext_range)
try:
best_chi = np.nanmin(chi)
best_idx = np.nanargmin(chi)
best_idx = np.unravel_index(best_idx,chi.shape)
best_scal = test_scale[best_idx[0]]*scal_fac
best_ext = test_ext[best_idx[1]]
except:
if force_continue:
return best_chi, best_scal, best_ext, n_dof
else:
msg = "Issue with grid search minimization for template {}. "
print(msg.format(tmp_name))
import pdb
pdb.set_trace()
else:
msg = "Search mode not recognised. Should be 'simplex' or 'grid'."
raise TypeError(msg)
if red_chi2:
best_chi /= n_dof
return best_chi, best_scal, best_ext, n_dof
def best_fit_tmp(lbda_obs, spec_obs, err_obs, tmp_reader, search_mode='simplex',
n_best=1, lambda_scal=None, scale_range=(0.1,10,0.01),
ext_range=None, simplex_options=None, dlbda_obs=None,
instru_corr=None, instru_res=None, instru_idx=None,
filter_reader=None, lib_dir='tmp_lib/', tmp_endswith='.fits',
red_chi2=True, remove_nan=False, nproc=1, verbosity=0,
force_continue=False, min_npts=1, **kwargs):
""" Finds the best fit template spectrum to a given observed spectrum,
within a spectral library. By default, a single free parameter is
considered: the scaling factor of the spectrum. A first automatic scaling
is performed by comparing the flux of the observed and template spectra at
lambda_scal. Then a more refined scaling is performed, either through
simplex or grid search (within scale_range).
If fit_extinction is set to True, the exctinction is also considered as a
free parameter.
Parameters
----------
lbda_obs : numpy 1d ndarray or list
Wavelength of observed spectrum. If several instruments, should be
ordered per instrument, not necessarily as monotonically increasing
wavelength. Hereafter, n_ch = len(lbda_obs).
spec_obs : numpy 1d ndarray or list
Observed spectrum for each value of lbda_obs.
err_obs : numpy 1d/2d ndarray or list
Uncertainties on the observed spectrum. If 2d array, should be [2,n_ch]
where the first (resp. second) column corresponds to lower (upper)
uncertainty, and n_ch is the length of lbda_obs and spec_obs.
tmp_reader : python routine
External routine that reads a model file and returns a 3D numpy array,
where the first column corresponds to wavelengths, the second
contains flux values, and the third the uncertainties on the flux.
search_mode: str, optional, {'simplex','grid'}
How is the best fit template found? Simplex or grid search.
n_best: int, optional
Number of best templates to be returned (default: 1)
lambda_scal: float, optional
Wavelength where a first scaling will be performed between template
and observed spectra. If not provided, the middle wavelength of the
osberved spectra will be considered.
scale_range: tuple, opt
If grid search, this parameter should be provided as a tuple of 3
floats: lower limit, upper limit and step of the grid search for the
scaling factor to be applied AFTER the first rough scaling (i.e.
scale_range should always encompass 1).
ext_range: tuple or None, opt
If None: differential extinction is not to be considered as a free
parameter. Elif a tuple of 3 floats is provided, differential extinction
will be considered, with the floats as lower limit, upper limit and step
of the grid search.
Note: if simplex search, the range is still used to set a chi of
np.inf outside of the range.
dlbda_obs: numpy 1d ndarray or list, optional
Spectral channel width for the observed spectrum. It should be provided
IF one wants to weigh each point based on the spectral
resolution of the respective instruments (as in Olofsson et al. 2016).
instru_corr : numpy 2d ndarray or list, optional
Spectral correlation throughout post-processed images in which the
spectrum is measured. It is specific to the combination of instrument,
algorithm and radial separation of the companion from the central star.
Can be computed using distances.spectral_correlation(). In case of
a spectrum obtained with different instruments, build it with
distances.combine_corrs(). If not provided, it will consider the
uncertainties in each spectral channels are independent. See Greco &
Brandt (2017) for details.
instru_res : float or list of floats/strings, optional
The mean instrumental spectral resolution(s) OR filter names. This is
used to convolve the model spectrum. If several instruments are used,
provide a list of spectral resolution values / filter names, one for
each instrument used.
instru_idx: numpy 1d array, optional
1d array containing an index representing each instrument used
to obtain the spectrum, label them from 0 to n_instru. Zero for points
that don't correspond to any instru_res provided above, and i in
[1,n_instru] for points associated to instru_res[i-1]. This parameter
must be provided if the spectrum consists of points obtained with
different instruments.
filter_reader: python routine, optional
External routine that reads a filter file and returns a 2D numpy array,
where the first column corresponds to wavelengths, and the second
contains transmission values. Important: if not provided, but strings
are detected in instru_res, the default file reader will be used.
It assumes the following format for the files:
- first row containing header
- starting from 2nd row: 1st column: wavelength, 2nd col.: transmission
- Unit of wavelength can be provided in parentheses of first header key
name: e.g. "WL(AA)" for angstrom, "wavelength(mu)" for micrometer or
"lambda(nm)" for nanometer. Note: Only what is in parentheses matters.
Important: filter files should all have the same format and WL units.
simplex_options: dict, optional
The scipy.optimize.minimize simplex (Nelder-Mead) options.
red_chi2: bool, optional
Whether to compute the reduced chi square. If False, considers chi^2.
remove_nan: bool, optional
Whether to remove NaN values from template spectrum BEFORE resampling
to the wavelength sampling of the observed spectrum. Whether it is set
to True or False, a check is made just before chi^2 is calculated
(after resampling), and only non-NaN values will be considered.
nproc: None or int, optional
The number of processes to use for parallelization. If set to None,
will automatically use half of the available CPUs on the machine.
verbosity: 0, 1 or 2, optional
Verbosity level. 0 for no output and 2 for full information.
force_continue: bool, optional
In case of issue with the fit, whether to continue regardless (this may
be useful in an uneven spectral library, where some templates have too
few points for the fit to be performed).
min_npts: int or None, optional
Minimum number of (resampled) points to consider a
template spectrum valid in the minimization search.
**kwargs: optional
Optional arguments to the scipy.optimize.minimize function
Returns
-------
final_tmpname: tuple of n_best str
Best-fit template filenames
final_tmp: tuple of n_best 3D numpy array
Best-fit template spectra (3D: lbda+spec+spec_err)
final_chi: 1D numpy array of length n_best
Best-fit template chi^2
final_params: 2D numpy array (2xn_best)
Best-fit parameters (optimal scaling and optical extinction). Note if
extinction is not fitted, optimal AV will be set to 0.
"""
# create list of template filenames
tmp_filelist = [x for x in os.listdir(lib_dir) if x.endswith(tmp_endswith)]
n_tmp = len(tmp_filelist)
if verbosity > 0:
start_time = time_ini()
int_time = start_time
print("{:.0f} template spectra will be tested.".format(n_tmp))
chi = np.ones(n_tmp)
scal = np.ones_like(chi)
ext = np.zeros_like(chi)
n_dof = np.ones_like(chi)
if nproc is None:
nproc = cpu_count()//2
if verbosity:
print("{:.0f} CPUs will be used".format(nproc))
if verbosity:
print("****************************************\n")
if nproc == 1:
for tt in range(n_tmp):
if verbosity>1 and search_mode=='simplex':
print('Nelder-Mead minimization is running...')
res = get_chi(lbda_obs, spec_obs, err_obs, tmp_filelist[tt],
tmp_reader, search_mode=search_mode,
scale_range=scale_range, ext_range=ext_range,
lambda_scal=lambda_scal, dlbda_obs=dlbda_obs,
instru_corr=instru_corr, instru_res=instru_res,
instru_idx=instru_idx, filter_reader=filter_reader,
simplex_options=simplex_options, red_chi2=red_chi2,
remove_nan=remove_nan, force_continue=force_continue,
verbose=verbosity, min_npts=min_npts, **kwargs)
chi[tt], scal[tt], ext[tt], n_dof[tt] = res
shortname = tmp_filelist[tt][:-len(tmp_endswith)]
if not np.isfinite(chi[tt]):
if verbosity>0:
msg_err = "{:.0f}/{:.0f} ({}) FAILED"
if np.isnan(chi[tt]):
msg_err += " (simplex did not converge)"
print(msg_err.format(tt+1, n_tmp, tmp_filelist[tt]))
else:
if verbosity > 0 and tt==0:
msg = "{:.0f}/{:.0f}: {}, chi_r^2 = {:.1f}, ndof={:.0f}"
if verbosity>1:
msg+=", done in {}s."
indiv_time = time_fin(start_time)
print(msg.format(tt+1, n_tmp, shortname, chi[tt],
n_dof[tt], indiv_time))
else:
print(msg.format(tt+1, n_tmp, shortname, chi[tt], n_dof[tt]))
now = datetime.now()
delta_t = now.timestamp()-start_time.timestamp()
tot_time = np.ceil(n_tmp*delta_t/60)
msg = "Based on the first fit, it may take ~{:.0f}min to"
msg += " test the whole library \n"
print(msg.format(tot_time))
int_time = time_ini(verbose=False)
elif verbosity > 0:
msg = "{:.0f}/{:.0f}: {}, chi_r^2 = {:.1f}, ndof={:.0f}"
if verbosity>1:
msg+=" done in {}s."
indiv_time = time_fin(int_time)
int_time = time_ini(verbose=False)
print(msg.format(tt+1, n_tmp, shortname, chi[tt],
n_dof[tt], indiv_time))
else:
print(msg.format(tt+1, n_tmp, shortname, chi[tt],
n_dof[tt]))
else:
res = pool_map(nproc, get_chi, lbda_obs, spec_obs, err_obs,
iterable(tmp_filelist), tmp_reader, search_mode,
lambda_scal, scale_range, ext_range, dlbda_obs,
instru_corr, instru_res, instru_idx, filter_reader,
simplex_options, red_chi2, remove_nan, force_continue,
verbosity, min_npts)
res = np.array(res, dtype=object)
chi = res[:,0]
scal = res[:,1]
ext = res[:,2]
n_dof = res[:,3]
n_success = np.sum(np.isfinite(chi))
if verbosity > 0:
print("****************************************\n")
msg = "{:.0f}/{:.0f} template spectra were fitted. \n"
print(msg.format(n_success, n_tmp))
timing(start_time)
return best_n_tmp(chi, scal, ext, n_dof, tmp_filelist, tmp_reader,
n_best=n_best, verbose=verbosity)
def best_n_tmp(chi, scal, ext, n_dof, tmp_filelist, tmp_reader, n_best=1,
verbose=False):
"""
Routine returning the n best template spectra.
Parameters
----------
lbda_obs : numpy 1d ndarray or list
Wavelength of observed spectrum. If several instruments, should be
ordered per instrument, not necessarily as monotonically increasing
wavelength. Hereafter, n_ch = len(lbda_obs).
Returns
-------
final_tmpname: tuple of n_best str
Best-fit template filenames
final_tmp: tuple of n_best 3D numpy array
Best-fit template spectra (3D: lbda+spec+spec_err)
final_chi: 1D numpy array of length n_best
Best-fit template chi^2
final_params: 2D numpy array (2xn_best)
Best-fit parameters (optimal scaling and optical extinction). Note if
extinction is not fitted, optimal AV will be set to 0.
"""
# sort from best to worst match
order = np.argsort(chi)
sort_chi = chi[order]
sort_scal = scal[order]
sort_ext = ext[order]
sort_ndof = n_dof[order]
sort_filelist = [tmp_filelist[i] for i in order]
if verbose:
print("best chi: ", sort_chi[:n_best])
print("best scale fac: ", sort_scal[:n_best])
print("n_dof: ", sort_ndof[:n_best])
# take the n_best first ones
best_tmp = []
for n in range(n_best):
lbda_tmp, spec_tmp, spec_tmp_err = tmp_reader(sort_filelist[n])
Albdas = extinction(lbda_tmp,abs(sort_ext[n]))
extinc_fac = np.power(10.,-Albdas/2.5)
if sort_ext[n]>0:
final_scal = sort_scal[n]*extinc_fac
elif sort_ext[n]<0:
final_scal = sort_scal[n]/extinc_fac
else:
final_scal = sort_scal[n]
best_tmp.append(np.array([lbda_tmp, spec_tmp*final_scal,
spec_tmp_err*final_scal]))
if verbose:
msg = "The best template #{:.0f} is: {} "
msg+="(Delta A_V={:.1f}mag)\n"
print(msg.format(n+1, sort_filelist[n], sort_ext[n]))
best_tmpname = tuple(sort_filelist[:n_best])
best_tmp = tuple(best_tmp)
best_params = np.array([sort_scal[:n_best],sort_ext[:n_best]])
return best_tmpname, best_tmp, sort_chi[:n_best], best_params, sort_ndof[:n_best] | StarcoderdataPython |
3539615 | <reponame>spradeepv/dive-into-python<gh_stars>0
n, k = map(int, raw_input().split())
list_a = map(int, raw_input().split())
set_a = set(list_a)
list_b = []
for i in set_a:
list_b.append(i + k)
set_b = set(list_b)
print len(set_a.intersection(set_b))
| StarcoderdataPython |
6403514 | #!/usr/bin/python
# Copyright: (c) 2020, DellEMC
""" Ansible module for managing Filesystem Snapshots on Unity"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: dellemc_unity_filesystem_snapshot
short_description: Manage filesystem snapshot on the Unity storage system
description:
- Managing Filesystem Snapshot on the Unity storage system includes
create filesystem snapshot, get filesystem snapshot, modify filesystem
snapshot and delete filesystem snapshot.
version_added: "1.1.0"
extends_documentation_fragment:
- dellemc.unity.dellemc_unity.unity
author:
- <NAME> (@kharer5) <<EMAIL>>
options:
snapshot_name:
description:
- The name of the filesystem snapshot.
- Mandatory parameter for creating a filesystem snapshot.
- For all other operations either snapshot_name or snapshot_id
is required.
type: str
snapshot_id:
description:
- During creation snapshot_id is auto generated.
- For all other operations either snapshot_id or snapshot_name
is required.
type: str
filesystem_name:
description:
- The name of the Filesystem for which snapshot is created.
- For creation of filesystem snapshot either filesystem_name or
filesystem_id is required.
- Not required for other operations.
type: str
filesystem_id:
description:
- The ID of the Filesystem for which snapshot is created.
- For creation of filesystem snapshot either filesystem_id or
filesystem_name is required.
- Not required for other operations.
type: str
nas_server_name:
description:
- The name of the NAS server in which the Filesystem is created.
- For creation of filesystem snapshot either nas_server_name or
nas_server_id is required.
- Not required for other operations.
type: str
nas_server_id:
description:
- The ID of the NAS server in which the Filesystem is created.
- For creation of filesystem snapshot either filesystem_id or
filesystem_name is required.
- Not required for other operations.
type: str
auto_delete:
description:
- This option specifies whether or not the filesystem snapshot will be
automatically deleted.
- If set to true, the filesystem snapshot will expire based on the pool
auto deletion policy.
- If set to false, the filesystem snapshot will not be auto deleted
based on the pool auto deletion policy.
- auto_delete can not be set to True, if expiry_time is specified.
- If during creation neither auto_delete nor expiry_time is mentioned
then the filesystem snapshot will be created keeping auto_delete as
True.
- Once the expiry_time is set, then the filesystem snapshot cannot be
assigned to the auto delete policy.
type: bool
expiry_time:
description:
- This option is for specifying the date and time after which the
filesystem snapshot will expire.
- The time is to be mentioned in UTC timezone.
- The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
type: str
description:
description:
- The additional information about the filesystem snapshot can be
provided using this option.
- The description can be removed by passing an empty string.
type: str
fs_access_type:
description:
- Access type of the filesystem snapshot.
- Required only during creation of filesystem snapshot.
- If not given, snapshot's access type will be 'Checkpoint'.
type: str
choices: ['Checkpoint' , 'Protocol']
state:
description:
- The state option is used to mention the existence of the filesystem
snapshot.
type: str
required: True
choices: ['absent', 'present']
notes:
- Filesystem snapshot cannot be deleted, if it has nfs or smb share.
'''
EXAMPLES = r'''
- name: Create Filesystem Snapshot
dellemc_unity_filesystem_snapshot:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
snapshot_name: "ansible_test_FS_snap"
filesystem_name: "ansible_test_FS"
nas_server_name: "lglad069"
description: "Created using playbook"
auto_delete: True
fs_access_type: "Protocol"
state: "present"
- name: Create Filesystem Snapshot with expiry time.
dellemc_unity_filesystem_snapshot:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
snapshot_name: "ansible_test_FS_snap_1"
filesystem_name: "ansible_test_FS_1"
nas_server_name: "lglad069"
description: "Created using playbook"
expiry_time: "04/15/2021 2:30"
fs_access_type: "Protocol"
state: "present"
- name: Get Filesystem Snapshot Details using Name
dellemc_unity_filesystem_snapshot:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
snapshot_name: "ansible_test_FS_snap"
state: "present"
- name: Get Filesystem Snapshot Details using ID
dellemc_unity_filesystem_snapshot:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
snapshot_id: "10008000403"
state: "present"
- name: Update Filesystem Snapshot attributes
dellemc_unity_filesystem_snapshot:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
snapshot_name: "ansible_test_FS_snap"
description: "Description updated"
auto_delete: False
expiry_time: "04/15/2021 5:30"
state: "present"
- name: Update Filesystem Snapshot attributes using ID
dellemc_unity_filesystem_snapshot:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
snapshot_id: "10008000403"
expiry_time: "04/18/2021 8:30"
state: "present"
- name: Delete Filesystem Snapshot using Name
dellemc_unity_filesystem_snapshot:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
snapshot_name: "ansible_test_FS_snap"
state: "absent"
- name: Delete Filesystem Snapshot using ID
dellemc_unity_filesystem_snapshot:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
snapshot_id: "10008000403"
state: "absent"
'''
RETURN = r'''
changed:
description: Whether or not the resource has changed
returned: always
type: bool
filesystem_snapshot_details:
description: Details of the filesystem snapshot.
returned: When filesystem snapshot exists
type: complex
contains:
access_type:
description: Access type of filesystem snapshot.
type: str
attached_wwn:
description: Attached WWN details.
type: str
creation_time:
description: Creation time of filesystem snapshot.
type: str
creator_schedule:
description: Creator schedule of filesystem snapshot.
type: str
creator_type:
description: Creator type for filesystem snapshot.
type: str
creator_user:
description: Creator user for filesystem snapshot.
type: str
description:
description: Description of the filesystem snapshot.
type: str
expiration_time:
description: Date and time after which the filesystem snapshot
will expire.
type: str
is_auto_delete:
description: Is the filesystem snapshot is auto deleted or not.
type: bool
id:
description: Unique identifier of the filesystem snapshot
instance.
type: str
name:
description: The name of the filesystem snapshot.
type: str
size:
description: Size of the filesystem snapshot.
type: int
filesystem_name:
description: Name of the filesystem for which the snapshot exists.
type: str
filesystem_id:
description: Id of the filesystem for which the snapshot exists.
type: str
nas_server_name:
description: Name of the NAS server on which filesystem exists.
type: str
nas_server_id:
description: Id of the NAS server on which filesystem exists.
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
import dellemc_ansible_unity_utils as utils
from datetime import datetime
LOG = utils.get_logger('dellemc_unity_filesystem_snapshot')
HAS_UNITY_SDK = utils.get_unity_sdk()
UNITY_SDK_VERSION_CHECK = utils.storops_version_check()
application_type = "Ansible/1.2.0"
class UnityFilesystemSnapshot(object):
"""Class with Filesystem Snapshot operations"""
def __init__(self):
""" Define all parameters required by this module"""
self.module_params = utils.get_unity_management_host_parameters()
self.module_params.update(get_unity_snapshot_parameters())
mutually_exclusive = [['snapshot_name', 'snapshot_id'],
['filesystem_name', 'filesystem_id'],
['nas_server_name', 'nas_server_id']]
required_one_of = [['snapshot_name', 'snapshot_id']]
# initialize the ansible module
self.module = AnsibleModule(argument_spec=self.module_params,
supports_check_mode=False,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of)
# result is a dictionary that contains changed status and
# filesystem snapshot details
self.result = {"changed": False,
'filesystem_snapshot_details': None}
if not HAS_UNITY_SDK:
self.module.fail_json(msg="Ansible modules for Unity require the"
" Unity python library to be"
" installed. Please install the "
"library before using these modules.")
if UNITY_SDK_VERSION_CHECK and \
not UNITY_SDK_VERSION_CHECK['supported_version']:
err_msg = UNITY_SDK_VERSION_CHECK['unsupported_version_message']
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
self.unity_conn = utils.get_unity_unisphere_connection(
self.module.params, application_type)
self.snap_obj = utils.snap.UnitySnap(self.unity_conn)
LOG.info('Connection established with the Unity Array')
def validate_expiry_time(self, expiry_time):
"""Validates the specified expiry_time"""
try:
datetime.strptime(expiry_time, '%m/%d/%Y %H:%M')
except ValueError:
error_msg = ("expiry_time: %s, not in MM/DD/YYYY HH:MM format." %
expiry_time)
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
def to_update(self, fs_snapshot, description=None, auto_del=None,
expiry_time=None, fs_access_type=None):
"""Determines whether to update the snapshot or not"""
snap_modify_dict = dict()
if fs_access_type and fs_access_type != fs_snapshot.access_type:
error_message = "Modification of access type is not allowed."
LOG.error(error_message)
self.module.fail_json(msg=error_message)
if expiry_time:
# If the snapshot has is_auto_delete True,
# Check if auto_delete in the input is either None or True
if fs_snapshot.is_auto_delete and (auto_del is None or auto_del):
self.module.fail_json(msg="expiry_time can be assigned when"
" auto delete is False.")
if auto_del is not None:
if fs_snapshot.expiration_time:
error_msg = "expiry_time for filesystem snapshot is set." \
" Once it is set then snapshot cannot" \
" be assigned to auto_delete policy."
self.module.fail_json(msg=error_msg)
if auto_del != fs_snapshot.is_auto_delete:
snap_modify_dict['is_auto_delete'] = auto_del
if description is not None and description != fs_snapshot.description:
snap_modify_dict['description'] = description
if to_update_expiry_time(fs_snapshot, expiry_time):
snap_modify_dict['expiry_time'] = expiry_time
LOG.info("Snapshot modification details: %s", snap_modify_dict)
return snap_modify_dict
def update_filesystem_snapshot(self, fs_snapshot, snap_modify_dict):
try:
duration = None
if 'expiry_time' in snap_modify_dict \
and snap_modify_dict['expiry_time']:
duration = convert_timestamp_to_sec(
snap_modify_dict['expiry_time'],
self.unity_conn.system_time)
if duration and duration <= 0:
self.module.fail_json(msg="expiry_time should be after"
" the current system time.")
if 'is_auto_delete' in snap_modify_dict \
and snap_modify_dict['is_auto_delete'] is not None:
auto_delete = snap_modify_dict['is_auto_delete']
else:
auto_delete = None
if 'description' in snap_modify_dict \
and (snap_modify_dict['description']
or len(snap_modify_dict['description']) == 0):
description = snap_modify_dict['description']
else:
description = None
fs_snapshot.modify(retentionDuration=duration,
isAutoDelete=auto_delete,
description=description)
fs_snapshot.update()
except Exception as e:
error_msg = "Failed to modify filesystem snapshot" \
" [name: %s , id: %s] with error %s."\
% (fs_snapshot.name, fs_snapshot.id, str(e))
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
def create_filesystem_snapshot(self, snap_name, storage_id,
description=None, auto_del=None,
expiry_time=None, fs_access_type=None):
try:
duration = None
if expiry_time:
duration = convert_timestamp_to_sec(
expiry_time, self.unity_conn.system_time)
if duration <= 0:
self.module.fail_json(msg="expiry_time should be after"
" the current system time.")
fs_snapshot = self.snap_obj.create(
cli=self.unity_conn._cli, storage_resource=storage_id,
name=snap_name, description=description,
is_auto_delete=auto_del, retention_duration=duration,
fs_access_type=fs_access_type)
return fs_snapshot
except Exception as e:
error_msg = "Failed to create filesystem snapshot" \
" %s with error %s" % (snap_name, str(e))
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
def is_snap_has_share(self, fs_snap):
try:
obj = self.unity_conn.get_nfs_share(snap=fs_snap) or \
self.unity_conn.get_cifs_share(snap=fs_snap)
if len(obj) > 0:
LOG.info("Snapshot has %s nfs/smb share/s", len(obj))
return True
except Exception as e:
msg = "Failed to get nfs/smb share from filesystem snapshot. " \
"error: %s" % str(e)
LOG.error(msg)
self.module.fail_json(msg=msg)
return False
def delete_fs_snapshot(self, fs_snapshot):
try:
# Checking whether nfs/smb share created from fs_snapshot
if self.is_snap_has_share(fs_snapshot):
msg = "Filesystem snapshot cannot be deleted because it has " \
"nfs/smb share"
LOG.error(msg)
self.module.fail_json(msg=msg)
fs_snapshot.delete()
return None
except Exception as e:
error_msg = "Failed to delete filesystem snapshot" \
" [name: %s, id: %s] with error %s." \
% (fs_snapshot.name, fs_snapshot.id, str(e))
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
def get_fs_snapshot_obj(self, name=None, id=None):
fs_snapshot = id if id else name
msg = "Failed to get details of filesystem snapshot %s with error %s."
try:
fs_snap_obj = self.unity_conn.get_snap(name=name, _id=id)
if fs_snap_obj and fs_snap_obj.existed:
LOG.info("Successfully got the filesystem snapshot object "
"%s.", fs_snap_obj)
else:
fs_snap_obj = None
return fs_snap_obj
except utils.HttpError as e:
if e.http_status == 401:
cred_err = ("Incorrect username or password , %s" % e.message)
self.module.fail_json(msg=cred_err)
else:
err_msg = msg % (fs_snapshot, str(e))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
except utils.UnityResourceNotFoundError as e:
err_msg = msg % (fs_snapshot, str(e))
LOG.error(err_msg)
return None
except Exception as e:
err_msg = msg % (fs_snapshot, str(e))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
def get_filesystem_obj(self, nas_server=None, name=None, id=None):
filesystem = id if id else name
try:
obj_fs = None
if name:
if not nas_server:
err_msg = "NAS Server is required to get the FileSystem."
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
obj_fs = self.unity_conn.get_filesystem(name=name,
nas_server=nas_server)
if obj_fs and obj_fs.existed:
LOG.info("Successfully got the filesystem object %s.",
obj_fs)
return obj_fs
if id:
if nas_server:
obj_fs = self.unity_conn\
.get_filesystem(id=id, nas_server=nas_server)
else:
obj_fs = self.unity_conn.get_filesystem(id=id)
if obj_fs and obj_fs.existed:
LOG.info("Successfully got the filesystem object %s.",
obj_fs)
return obj_fs
except Exception as e:
error_msg = "Failed to get filesystem %s with error %s."\
% (filesystem, str(e))
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
def get_nas_server_obj(self, name=None, id=None):
nas_server = id if id else name
error_msg = ("Failed to get NAS server %s." % nas_server)
try:
obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
if name and obj_nas.existed:
LOG.info("Successfully got the NAS server object %s.",
obj_nas)
return obj_nas
elif id and obj_nas.existed:
LOG.info("Successfully got the NAS server object %s.",
obj_nas)
return obj_nas
else:
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
except Exception as e:
error_msg = "Failed to get NAS server %s with error %s."\
% (nas_server, str(e))
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
def create_fs_snapshot_details_dict(self, fs_snapshot):
""" Add name and id of storage resource to filesystem snapshot
details """
snapshot_dict = fs_snapshot._get_properties()
del snapshot_dict['storage_resource']
snapshot_dict['filesystem_name'] = fs_snapshot.storage_resource.name
snapshot_dict['filesystem_id'] = fs_snapshot.storage_resource.filesystem.id
obj_fs = self.unity_conn.\
get_filesystem(id=fs_snapshot.storage_resource.filesystem.id)
if obj_fs and obj_fs.existed:
snapshot_dict['nas_server_name'] = obj_fs.nas_server[0].name
snapshot_dict['nas_server_id'] = obj_fs.nas_server[0].id
return snapshot_dict
def perform_module_operation(self):
"""
Perform different actions on snapshot module based on parameters
chosen in playbook
"""
snapshot_name = self.module.params['snapshot_name']
snapshot_id = self.module.params['snapshot_id']
filesystem_name = self.module.params['filesystem_name']
filesystem_id = self.module.params['filesystem_id']
nas_server_name = self.module.params['nas_server_name']
nas_server_id = self.module.params['nas_server_id']
auto_delete = self.module.params['auto_delete']
expiry_time = self.module.params['expiry_time']
description = self.module.params['description']
fs_access_type = self.module.params['fs_access_type']
state = self.module.params['state']
nas_server_resource = None
filesystem_resource = None
changed = False
LOG.info("Getting Filesystem Snapshot details.")
fs_snapshot = self.get_fs_snapshot_obj(name=snapshot_name,
id=snapshot_id)
msg = "Filesystem Snapshot details: %s." % str(fs_snapshot)
LOG.info(msg)
# Get NAS server Object
if nas_server_name is not None:
if nas_server_name == "" or nas_server_name.isspace():
self.module.fail_json(msg="Invalid nas_server_name given,"
" Please provide a valid name.")
nas_server_resource = self\
.get_nas_server_obj(name=nas_server_name)
elif nas_server_id is not None:
if nas_server_id == "" or nas_server_id.isspace():
self.module.fail_json(msg="Invalid nas_server_id given,"
" Please provide a valid ID.")
nas_server_resource = self.get_nas_server_obj(id=nas_server_id)
# Get Filesystem Object
if filesystem_name is not None:
if filesystem_name == "" or filesystem_name.isspace():
self.module.fail_json(msg="Invalid filesystem_name given,"
" Please provide a valid name.")
filesystem_resource = self\
.get_filesystem_obj(nas_server=nas_server_resource,
name=filesystem_name)
fs_res_id = filesystem_resource.storage_resource.id
elif filesystem_id is not None:
if filesystem_id == "" or filesystem_id.isspace():
self.module.fail_json(msg="Invalid filesystem_id given,"
" Please provide a valid ID.")
filesystem_resource = self\
.get_filesystem_obj(id=filesystem_id)
fs_res_id = filesystem_resource[0].storage_resource.id
# Check for error, if user tries to create a filesystem snapshot
# with the same name.
if fs_snapshot and filesystem_resource and \
(fs_snapshot.storage_resource.id
!= fs_res_id):
self.module.fail_json(
msg="Snapshot %s is of %s storage resource. Cannot create new"
" snapshot with same name for %s storage resource."
% (fs_snapshot.name, fs_snapshot.storage_resource.name,
filesystem_resource.storage_resource.name))
# check for valid expiry_time
if expiry_time is not None and \
(expiry_time == "" or expiry_time.isspace()):
self.module.fail_json(msg="Please provide valid expiry_time,"
" empty expiry_time given.")
if expiry_time:
self.validate_expiry_time(expiry_time)
# Check if in input auto_delete is True and expiry_time is not None
if expiry_time and auto_delete:
error_msg = "Cannot set expiry_time if auto_delete given as True."
LOG.info(error_msg)
self.module.fail_json(msg=error_msg)
# check for fs_access_type
if fs_access_type is not None:
if (fs_access_type == "" or fs_access_type.isspace()):
self.module.fail_json(msg="Please provide valid "
"fs_access_type, empty "
"fs_access_type given.")
if fs_access_type == "Checkpoint":
fs_access_type = utils.FilesystemSnapAccessTypeEnum.CHECKPOINT
elif fs_access_type == "Protocol":
fs_access_type = utils.FilesystemSnapAccessTypeEnum.PROTOCOL
# Check whether to modify the filesystem snapshot or not
fs_snap_modify_dict = dict()
if state == 'present' and fs_snapshot:
fs_snap_modify_dict = self\
.to_update(fs_snapshot, description=description,
auto_del=auto_delete, expiry_time=expiry_time,
fs_access_type=fs_access_type)
# Create Filesystem Snapshot
if not fs_snapshot and state == "present":
LOG.info("Creating the filesystem snapshot.")
if snapshot_id:
self.module.fail_json(msg="Creation of Filesystem Snapshot is"
" allowed using snapshot_name only,"
" snapshot_id given.")
if snapshot_name == "" or snapshot_name.isspace():
self.module.fail_json(msg="snapshot_name is required for"
" creation of the filesystem"
" snapshot, empty snapshot_name"
" given.")
if not filesystem_resource:
self.module.fail_json(msg="filesystem_name or filesystem_id"
" required to create a snapshot.")
fs_snapshot = self.create_filesystem_snapshot(
snapshot_name,
fs_res_id,
description,
auto_delete,
expiry_time,
fs_access_type)
changed = True
# Update the Snapshot
if fs_snapshot and state == "present" and fs_snap_modify_dict:
LOG.info("Updating the Filesystem Snapshot.")
self.update_filesystem_snapshot(fs_snapshot, fs_snap_modify_dict)
changed = True
# Delete the Filesystem Snapshot
if state == "absent" and fs_snapshot:
fs_snapshot = self.delete_fs_snapshot(fs_snapshot)
changed = True
# Add filesystem snapshot details to the result.
if fs_snapshot:
fs_snapshot.update()
self.result["filesystem_snapshot_details"] = \
self.create_fs_snapshot_details_dict(fs_snapshot)
else:
self.result["filesystem_snapshot_details"] = {}
self.result["changed"] = changed
self.module.exit_json(**self.result)
def to_update_expiry_time(fs_snapshot, expiry_time=None):
""" Check whether to update expiry_time or not"""
if not expiry_time:
return False
if fs_snapshot.expiration_time is None:
return True
if convert_timestamp_to_sec(expiry_time, fs_snapshot.expiration_time)\
!= 0:
return True
return False
def convert_timestamp_to_sec(expiry_time, snap_time):
"""Converts the time difference to seconds"""
snap_time_str = snap_time.strftime('%m/%d/%Y %H:%M')
snap_timestamp = datetime.strptime(snap_time_str, '%m/%d/%Y %H:%M')
expiry_timestamp = datetime.strptime(expiry_time, "%m/%d/%Y %H:%M")
return int((expiry_timestamp - snap_timestamp).total_seconds())
def get_unity_snapshot_parameters():
"""This method provide parameter required for the ansible filesystem
snapshot module on Unity"""
return dict(
snapshot_name=dict(required=False, type='str'),
snapshot_id=dict(required=False, type='str'),
filesystem_name=dict(required=False, type='str'),
filesystem_id=dict(required=False, type='str'),
nas_server_name=dict(required=False, type='str'),
nas_server_id=dict(required=False, type='str'),
auto_delete=dict(required=False, type='bool'),
expiry_time=dict(required=False, type='str'),
description=dict(required=False, type='str'),
fs_access_type=dict(required=False, type='str',
choices=['Checkpoint', 'Protocol']),
state=dict(required=True, type='str', choices=['present', 'absent'])
)
def main():
""" Create Unity Filesystem Snapshot object and perform actions on it
based on user input from playbook"""
obj = UnityFilesystemSnapshot()
obj.perform_module_operation()
if __name__ == '__main__':
main()
| StarcoderdataPython |
6683987 | from coffin.template import loader
from django.views.generic import create_update as _create_update
import functools
create_object = functools.partial(_create_update.create_object, template_loader=loader)
update_object = functools.partial(_create_update.update_object, template_loader=loader)
delete_object = functools.partial(_create_update.delete_object, template_loader=loader)
| StarcoderdataPython |
3436924 | import sys
from django.core.management.base import BaseCommand, CommandError
from searcher.models import Person, Contribution, Motion, Question
class Command(BaseCommand):
help = 'Returns the total number of table rows in the DB'
def handle(self, *args, **options):
sys.stdout.write('Counting rows...')
persons = Person.objects.count()
contribs = Contribution.objects.count()
motions = Motion.objects.count()
questions = Question.objects.count()
print("""
{} people
{} contributions
{} motions
{} questions
{} total rows
""".format(persons, contribs, motions, questions,
persons + contribs + motions + questions)) | StarcoderdataPython |
3364018 | <filename>rllib/policy/tests/test_multi_agent_batch.py
import unittest
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils.test_utils import check_same_batch
class TestMultiAgentBatch(unittest.TestCase):
def test_timeslices_non_overlapping_experiences(self):
"""Tests if timeslices works as expected on a MultiAgentBatch
consisting of two non-overlapping SampleBatches.
"""
def _generate_data(agent_idx):
batch = SampleBatch(
{
SampleBatch.T: [0, 1],
SampleBatch.EPS_ID: 2 * [agent_idx],
SampleBatch.AGENT_INDEX: 2 * [agent_idx],
SampleBatch.SEQ_LENS: [2],
}
)
return batch
policy_batches = {str(idx): _generate_data(idx) for idx in (range(2))}
ma_batch = MultiAgentBatch(policy_batches, 4)
sliced_ma_batches = ma_batch.timeslices(1)
[
check_same_batch(i, j)
for i, j in zip(
sliced_ma_batches,
[
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
MultiAgentBatch(
{
"1": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [1],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
MultiAgentBatch(
{
"1": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [1],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
],
)
]
def test_timeslices_partially_overlapping_experiences(self):
"""Tests if timeslices works as expected on a MultiAgentBatch
consisting of two partially overlapping SampleBatches.
"""
def _generate_data(agent_idx, t_start):
batch = SampleBatch(
{
SampleBatch.T: [t_start, t_start + 1],
SampleBatch.EPS_ID: [0, 0],
SampleBatch.AGENT_INDEX: 2 * [agent_idx],
SampleBatch.SEQ_LENS: [2],
}
)
return batch
policy_batches = {str(idx): _generate_data(idx, idx) for idx in (range(2))}
ma_batch = MultiAgentBatch(policy_batches, 4)
sliced_ma_batches = ma_batch.timeslices(1)
[
check_same_batch(i, j)
for i, j in zip(
sliced_ma_batches,
[
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
),
"1": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
),
},
1,
),
MultiAgentBatch(
{
"1": SampleBatch(
{
SampleBatch.T: [2],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
],
)
]
def test_timeslices_fully_overlapping_experiences(self):
"""Tests if timeslices works as expected on a MultiAgentBatch
consisting of two fully overlapping SampleBatches.
"""
def _generate_data(agent_idx):
batch = SampleBatch(
{
SampleBatch.T: [0, 1],
SampleBatch.EPS_ID: [0, 0],
SampleBatch.AGENT_INDEX: 2 * [agent_idx],
SampleBatch.SEQ_LENS: [2],
}
)
return batch
policy_batches = {str(idx): _generate_data(idx) for idx in (range(2))}
ma_batch = MultiAgentBatch(policy_batches, 4)
sliced_ma_batches = ma_batch.timeslices(1)
[
check_same_batch(i, j)
for i, j in zip(
sliced_ma_batches,
[
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
),
"1": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
),
},
1,
),
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
),
"1": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
),
},
1,
),
],
)
]
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| StarcoderdataPython |
4948400 | from pygal_maps_world.i18n import COUNTRIES
# 模块已经改了
def get_country_code(country_name):
'''
根据指定的国家,返回两个字母的国别码
'''
for code, name in COUNTRIES.items():
if name == country_name:
return code
# 如果没有找到指定的国家,就返回None
return None | StarcoderdataPython |
3285668 | import llvmlite.binding as llvm
def argsIOrole(kernelname, source, filename=None, arglist=False):
try:
m = llvm.parse_assembly(source)
except RuntimeError: # it is source code, not LLVM IR
if filename is None:
raise ValueError('The filename argument must not be None if source code (and not LLVM IR) is provided')
import tempfile
import subprocess as sp
llvmIRfile = tempfile.NamedTemporaryFile('r+')
outs = sp.run(
f'clang -c -x cl -emit-llvm -S -cl-std=CL2.0 -Xclang -finclude-default-header -fno-discard-value-names {filename} -o {llvmIRfile.name}'.split(),
stdout=sp.PIPE, stderr=sp.PIPE
)
if outs.stderr != b'':
raise RuntimeError('Compilation of the provided source file failed:\n' + outs.stderr.decode('ascii'))
with open(llvmIRfile.name, 'r') as f:
m = llvm.parse_assembly(f.read())
try:
f = m.get_function(kernelname)
except NameError:
raise NameError(f'The provided LLVM IR does not include a kernel named `{kernelname}`')
assert f.module is m
assert f.function is None
assert f.block is None
assert f.is_function and not (f.is_block or f.is_instruction)
# step 1: if argument is "readonly", it is input
iorole = dict(
[
(
str(a),
'input' if b'readonly' in a.attributes or not a.type.is_pointer else 'output'
)
for a in f.arguments
]
)
# step 2: for the rest, we must find if the kernel is reading from them (i.e., global load)
# if yes, they are input/output, otherwise they are only output
loads = []
for b in f.blocks:
for i in b.instructions:
if i.opcode == 'load':
loads.append(i)
for load in loads:
loadfrom = list(load.operands)[0]
bufferidx = loadfrom.name
for b in f.blocks:
for i in b.instructions:
if i.name == bufferidx:
gep = i
# prepare case where a value is loaded from a bitcasted reference
if 'bitcast' in str(loadfrom):
bitcasted = list(filter(lambda x: x.startswith('%'), str(loadfrom).split()))[-1].split('%')[-1]
for b in f.blocks:
for i in b.instructions:
if i.name == bitcasted:
gep = i
if gep.opcode == 'getelementptr':
buffer = str(list(gep.operands)[0])
# is the buffer a bitcast? (i.e. a "renaming" of the kernel argument via a pointer)
while 'bitcast' in buffer:
buffer = buffer.split('bitcast ')[1].split(' to')[0]
for b in f.blocks:
for i in b.instructions:
if i.name == buffer:
buffer = i
if buffer in iorole.keys() and iorole[buffer] == 'output':
iorole[buffer] = 'input/output'
if arglist:
return iorole, [str(a) for a in f.arguments]
return iorole
| StarcoderdataPython |
298292 | import collections
import io
import numpy as np
import torch
from fastprogress.fastprogress import force_console_behavior
master_bar, progress_bar = force_console_behavior()
def predict_test_data(
cpc, logreg, data, device, config, params, fixed_params=False, task12=True, task3=True
):
def load_task_params(task):
if config.use_best_task0:
task = 0
if fixed_params:
cpc_state_dict, logreg_state_dict = params
else:
cpc_state_dict, logreg_state_dict = params[task]
cpc.load_state_dict(cpc_state_dict)
logreg.load_state_dict(logreg_state_dict)
cpc_ = cpc.eval()
logreg_ = logreg.eval()
return cpc_, logreg_
crop_pre, crop_post = cpc.get_crops(device)
def add_padding(seq):
return np.concatenate(
(
np.zeros_like(seq)[:crop_pre],
seq,
np.zeros_like(seq)[:crop_post],
)
)
cpc, logreg = load_task_params(0)
test_predictions = collections.defaultdict(dict)
test_logits = collections.defaultdict(dict)
if task12:
with torch.no_grad():
bar = progress_bar(range(len(data.X_unlabeled)))
for idx in bar:
x = add_padding(data.X_unlabeled[idx].astype(np.float32))
x_extra = None
if config.use_extra_features:
x_extra = data.X_unlabeled_extra[idx].astype(np.float32)
x_extra = torch.from_numpy(x_extra).to(device, non_blocking=True)
g = data.groups_unlabeled[idx]
x = torch.transpose(torch.from_numpy(x[None, :, :]), 2, 1).to(
device, non_blocking=True
)
x_emb = cpc.embedder(x)
c = cpc.apply_contexter(x_emb, device)
logreg_features = c[0].T
for annotator in range(data.num_annotators):
a = np.array([annotator]).repeat(len(logreg_features))
task = 0
l = logreg(logreg_features, x_extra, a, task)
l = torch.cat((l[:1], l), dim=0) # crop from feature preprocessing
p = torch.argmax(l, dim=-1)
assert len(p) == len(data.X_unlabeled[idx]) + 1
with io.BytesIO() as buffer:
np.savez_compressed(buffer, p.cpu().data.numpy())
test_predictions[g.decode("utf-8")][annotator] = buffer.getvalue()
with io.BytesIO() as buffer:
np.savez_compressed(buffer, l.cpu().data.numpy().astype(np.float32))
test_logits[g.decode("utf-8")][annotator] = buffer.getvalue()
task3_test_logits = collections.defaultdict(dict)
if task3:
annotator = 0 # only first annotator for task 3
for task in range(1, max(data.clf_tasks) + 1):
cpc, logreg = load_task_params(task)
with torch.no_grad():
# bar = progress_bar(range(len(data.X_unlabeled)))
bar = range(len(data.X_unlabeled))
for idx in bar:
x = add_padding(data.X_unlabeled[idx].astype(np.float32))
x_extra = None
if config.use_extra_features:
x_extra = data.X_unlabeled_extra[idx].astype(np.float32)
x_extra = torch.from_numpy(x_extra).to(device, non_blocking=True)
g = data.groups_unlabeled[idx]
x = torch.transpose(torch.from_numpy(x[None, :, :]), 2, 1).to(
device, non_blocking=True
)
x_emb = cpc.embedder(x)
c = cpc.apply_contexter(x_emb, device)
logreg_features = c[0].T
a = np.array([annotator]).repeat(len(logreg_features))
l = logreg(logreg_features, x_extra, a, task)
l = torch.cat((l[:1], l), dim=0) # crop from feature preprocessing
p = torch.argmax(l, dim=-1)
assert len(p) == len(data.X_unlabeled[idx]) + 1
with io.BytesIO() as buffer:
np.savez_compressed(buffer, l.cpu().data.numpy().astype(np.float32))
task3_test_logits[g.decode("utf-8")][task] = buffer.getvalue()
return test_predictions, test_logits, task3_test_logits
| StarcoderdataPython |
1661170 | <filename>tools/json_schema_compiler/schema_util.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilies for the processing of schema python structures.
"""
def StripSchemaNamespace(s):
last_dot = s.rfind('.')
if not last_dot == -1:
return s[last_dot + 1:]
return s
def PrefixSchemasWithNamespace(schemas):
for s in schemas:
_PrefixWithNamespace(s.get("namespace"), s)
def _MaybePrefixFieldWithNamespace(namespace, schema, key):
if type(schema) == dict and key in schema:
old_value = schema[key]
if not "." in old_value:
schema[key] = namespace + "." + old_value
def _PrefixTypesWithNamespace(namespace, types):
for t in types:
_MaybePrefixFieldWithNamespace(namespace, t, "id")
_MaybePrefixFieldWithNamespace(namespace, t, "customBindings")
def _PrefixWithNamespace(namespace, schema):
if type(schema) == dict:
if "types" in schema:
_PrefixTypesWithNamespace(namespace, schema.get("types"))
_MaybePrefixFieldWithNamespace(namespace, schema, "$ref")
for s in schema:
_PrefixWithNamespace(namespace, schema[s])
elif type(schema) == list:
for s in schema:
_PrefixWithNamespace(namespace, s)
| StarcoderdataPython |
3494047 | <reponame>KamilWilczek/Log_app
from django.db import models
class Truck(models.Model):
car_manufacturer = models.CharField(max_length=100)
semitrailer = models.CharField(max_length=100)
capacity = models.CharField(max_length=100)
registration_number = models.CharField(max_length=100, unique=True)
message = models.CharField(max_length=500, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.car_manufacturer
class Car(models.Model):
truck = models.ManyToManyField(Truck)
name = models.CharField(max_length=50)
def __str__(self):
return self.name | StarcoderdataPython |
6675938 | # -- coding: utf-8 --
TEST = False # If True, run in test mode. If False, run in live mode
import os
from os.path import basename
from bookings import app
import settings
from google.appengine.ext import db
from models import Booking
from booking_ref_functions import derive
from email_templates import get_booking_confirmation_email_body
from email_templates import get_booking_confirmation_email_no_payment_body
from email_templates import get_pseudo_pdf_attachment_body
from calculations import calculate
from flask import render_template
from flask import url_for
from flask import flash
from flask import redirect
from flask import request
from flask import session
from forms import BookingForm
import datetime
from decimal import Decimal
import apiclient
from oauth2client.contrib.flask_util import UserOAuth2
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.application import MIMEApplication
import base64
from google.appengine.api import urlfetch
OAUTH2 = UserOAuth2(app)
CALENDAR = apiclient.discovery.build('calendar', 'v3')
SCOPES = ["https://www.googleapis.com/auth/calendar", "https://www.googleapis.com/auth/spreadsheets", 'https://www.googleapis.com/auth/gmail.send']
@app.route('/')
def homepage():
return render_template('homepage.html')
@app.route('/view_bookings')
def list_bookings():
bookings = db.GqlQuery("SELECT * FROM Booking ORDER BY booking_date")
return render_template('list_registrations.html', bookings=bookings)
def is_already_paid(booking_details):
return booking_details['source'] in ('airbnb', 'booking.com')
def extract_booking_details(form):
booking_reference = form.booking_reference.data
number_of_people = int(form.number_of_people.data)
derived = derive(booking_reference, number_of_people)
property = derived['booking_property']
first_name = form.first_name.data
last_name = form.last_name.data
source = form.source.data
email = form.email.data
notes = form.notes.data
phone = form.phone.data
arrival_date = (derived['booking_arrival_date']).date()
departure_date = (derived['booking_departure_date']).date()
gross = float(form.gross.data)
is_discount = bool(form.is_discount.data)
commission = float(derived['commission'])
due_date = datetime.date.today()
is_commission = bool(form.is_commission.data)
is_greeting = derived['booking_greeting'] if bool(form.is_greeting.data) else 0.0
is_laundry = derived['booking_laundry'] if bool(form.is_laundry.data) else 0.0
is_cleaning = derived['booking_cleaning'] if bool(form.is_cleaning.data) else 0.0
is_con = derived['booking_consumables'] if bool(form.is_con.data) else 0.0
booking_commission = float(derived['booking_commission'])
booking_commission = 0.15 if source == 'booking.com' else booking_commission
house_owner_commission = float(derived['house_owner_commission'])
net, booking_fee, house_owner_fee = calculate(gross, source, booking_commission, house_owner_commission)
total_fees = sum([house_owner_fee, is_greeting, is_laundry, is_cleaning, is_con])
owner_income = net - total_fees
booking_date = datetime.date.today()
booking = Booking(
booking_reference = booking_reference
, property = property
, first_name = first_name
, last_name = last_name
, source = source
, email = email
, notes = notes
, phone = phone
, arrival_date = arrival_date
, departure_date = departure_date
, number_of_people = number_of_people
, gross = gross
, is_discount = is_discount
, commission = commission
, due_date = due_date
, is_commission = is_commission
, is_greeting = is_greeting
, is_laundry = is_laundry
, is_cleaning = is_cleaning
, is_con = is_con
, net = net
, booking_fee = booking_fee
, house_owner_fee = house_owner_fee
, total_fees = total_fees
, owner_income = owner_income
)
booking_details = { 'booking' : booking
, 'booking_reference' : booking_reference
, 'property' : property
, 'first_name' : first_name
, 'last_name' : last_name
, 'source' : source
, 'email' : email
, 'notes' : notes
, 'phone' : phone
, 'arrival_date' : arrival_date#.strftime("%d/%m/%Y")
, 'departure_date' : departure_date#.strftime("%d/%m/%Y")
, 'number_of_people' : number_of_people
, 'number_of_nights' : (departure_date - arrival_date).days
, 'net' : net
, 'gross' : gross
, 'is_discount' : is_discount
, 'is_commission' : is_commission
, 'is_greeting' : is_greeting
, 'is_laundry' : is_laundry
, 'is_cleaning' : is_cleaning
, 'is_con' : is_con
, 'booking_date' : booking_date
, 'commission' : commission
, 'due_date' : due_date
, 'booking_fee' : booking_fee
, 'house_owner_fee' : house_owner_fee
, 'total_fees' : total_fees
, 'owner_income' : owner_income
}
return booking_details
def store_booking_in_database(booking_details):
booking_details['booking'].put()
def add_one_day(departure_date):
""" When creating an Event in Google Calendar that spans whole multiple
days (i.e. 'All Day' is ticked), you must add one day to the end in order
for it to finish on midnight of the departure date """
one_day = datetime.timedelta(days=1)
return departure_date + one_day
def get_guest_reminder(booking_details):
return {
"end": {
"date": (add_one_day(booking_details['departure_date'])).strftime("%Y-%m-%d"),
"timeZone": "Europe/London"
},
"start": {
"date": booking_details['arrival_date'].strftime("%Y-%m-%d"),
"timeZone": "Europe/London"
},
"summary": "%(first_name)s %(last_name)s, %(number_of_people)d people. %(phone)s" % booking_details,
"description": "Phone number: %(phone)s Email: %(email)s %(number_of_people)d people staying, leaving on %(departure_date)s. Booked through %(source)s. Notes: %(notes)s" % booking_details
}
def get_greeting_reminder(booking_details):
return {
"end": {
"dateTime": booking_details['arrival_date'].strftime("%Y-%m-%dT15:30:00"),
"timeZone": "Europe/London"
},
"start": {
"dateTime": booking_details['arrival_date'].strftime("%Y-%m-%dT15:00:00"),
"timeZone": "Europe/London"
},
"summary": "MG",
"description": "Phone number: %(phone)s Email: %(email)s %(number_of_people)d people staying, leaving on %(departure_date)s. Booked through %(source)s. Notes: %(notes)s" % booking_details
}
def get_cleaning_reminder(booking_details):
return {
"end": {
"dateTime": booking_details['departure_date'].strftime("%Y-%m-%dT13:00:00"),
"timeZone": "Europe/London"
},
"start": {
"dateTime": booking_details['departure_date'].strftime("%Y-%m-%dT11:00:00"),
"timeZone": "Europe/London"
},
"summary": "Clean",
"description": ""
}
def create_calendar_reminders(booking_details):
guest_duration_reminder = get_guest_reminder(booking_details)
greeting_reminder = get_greeting_reminder(booking_details)
cleaning_reminder = get_cleaning_reminder(booking_details)
calHttp = OAUTH2.http()
if TEST:
calendarId = 'primary' # TODO: comment this out when LIVE
else:
calendarId = settings.MAPPINGS[booking_details['property']]['calendar']
create_reminder = lambda event: CALENDAR.events().insert(calendarId=calendarId, body=event).execute(http=calHttp)
guest_response = create_reminder(guest_duration_reminder)
greeting_response = create_reminder(greeting_reminder)
cleaning_response = create_reminder(cleaning_reminder)
return guest_response, greeting_response, cleaning_response
def create_spreadsheet_entry(booking_details):
sheetHttp = OAUTH2.http()
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
SHEETS = apiclient.discovery.build('sheets', 'v4', http=sheetHttp, discoveryServiceUrl=discoveryUrl)
spreadsheetId = settings.SPREADSHEET_ID
if TEST:
spreadsheetId = 'testspreadsheetid' # TODO: comment this out when LIVE
rangeName = 'Sheet1!A1:A10'
data = {
"range": "Sheet1!A1:A10",
"majorDimension": "ROWS",
"values": [ [ booking_details['booking_reference']
, booking_details['property']
, booking_details['first_name']
, booking_details['last_name']
, booking_details['email']
, "'" + booking_details['phone'] # so that phone number displays in cell
, booking_details['notes']
, str(booking_details['booking_date'])
, booking_details['source']
, str(booking_details['arrival_date'])
, str(booking_details['departure_date'])
, str(booking_details['number_of_people'])
, str(booking_details['gross'])
, str(booking_details['net'])
, str(booking_details['is_discount'])
, str(booking_details['commission'])
, str(booking_details['due_date'])
, str(booking_details['is_commission'])
, str(booking_details['is_greeting'])
, str(booking_details['is_laundry'])
, str(booking_details['is_cleaning'])
, str(booking_details['is_con'])
, str(booking_details['booking_fee'])
, str(booking_details['house_owner_fee'])
, str(booking_details['total_fees'])
, str(booking_details['owner_income'])
] ],
}
urlfetch.set_default_fetch_deadline(60)
result = SHEETS.spreadsheets().values().append(
spreadsheetId=spreadsheetId, body=data, valueInputOption='USER_ENTERED', range=rangeName).execute(sheetHttp)
return result
def email_guest_booking_confirmation(booking_details):
gmailHttp = OAUTH2.http()
gmailService = apiclient.discovery.build('gmail', 'v1', http=gmailHttp)
strFrom = 'me'
strTo = booking_details['email']
# Create the root message and fill in the from, to, and subject headers
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = 'Booking Confirmation'
msgRoot['From'] = strFrom
msgRoot['To'] = strTo
msgRoot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgText = MIMEText('This email is only available to read in HTML format. Please switch your email viewing mode to HTML to read this email.')
msgAlternative.attach(msgText)
body = get_booking_confirmation_email_body(booking_details)
if is_already_paid(booking_details):
body = get_booking_confirmation_email_no_payment_body(booking_details)
# This example assumes the image is in the current directory
fp = open('./bookings/static/images/logo_medium.jpg', 'rb')
msgImage = MIMEImage(fp.read(), _subtype="jpg")
fp.close()
# Define the image's ID as referenced above
msgImage.add_header('Content-ID', '<image2>')
msgRoot.attach(msgImage)
fp = open('./bookings/static/images/header_large.jpg', 'rb')
msgImage = MIMEImage(fp.read(), _subtype="jpg")
fp.close()
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
if is_already_paid(booking_details): # airbnb email viewer does not display PDF, so include TnC in text.
body += get_pseudo_pdf_attachment_body()
else:
attachment_path = "./bookings/static/pdf/foo.pdf"
with open(attachment_path) as pdf_file:
pdf = MIMEApplication(pdf_file.read(), _subtype='pdf')
pdf.add_header('content-disposition', 'attachment', filename=basename(attachment_path))
msgRoot.attach(pdf)
msgText = MIMEText(body.encode('utf-8'), 'html', 'utf-8')
msgAlternative.attach(msgText)
msg = {'raw': base64.urlsafe_b64encode(msgRoot.as_string())}
results = gmailService.users().messages().send(userId='me', body=msg).execute(http=gmailHttp)
return results
@app.route('/booking', methods=['GET', 'POST'])
@OAUTH2.required(scopes=SCOPES)
def new_booking_1():
form = BookingForm()
if form.validate_on_submit():
booking_details = extract_booking_details(form)
create_calendar_reminders(booking_details)
create_spreadsheet_entry(booking_details)
email_guest_booking_confirmation(booking_details)
store_booking_in_database(booking_details)
flash("Booking made.")
return redirect(url_for('homepage'))
return render_template('new_booking_1.html', form=form, properties=settings.PROPERTIES)
@app.route('/calendars', methods=['GET', 'POST'])
@OAUTH2.required(scopes=["https://www.googleapis.com/auth/calendar"])
def list_calendarIds():
output = ""
http = OAUTH2.http()
page_token = None
while True:
calendar_list = CALENDAR.calendarList().list(pageToken=page_token).execute(http=http)
output+="<ul>"
for calendar_list_entry in calendar_list['items']:
output+="<li>"
output+="Name: " + calendar_list_entry['summary'] + ", calendarId: " + calendar_list_entry['id']
output+="</li>"
output+="</ul>"
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
return output
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
| StarcoderdataPython |
1642988 | <gh_stars>1-10
from datetime import datetime, timedelta
from unittest.mock import patch
import pytest
import pytz
from django.core import mail
from django.utils import timezone
from graphql_relay import to_global_id
from occurrences.consts import NOTIFICATION_TYPE_ALL, NOTIFICATION_TYPE_SMS
from occurrences.factories import (
EnrolmentFactory,
OccurrenceFactory,
PalvelutarjotinEventFactory,
StudyGroupFactory,
)
from occurrences.models import Enrolment
from occurrences.tests.test_api import MASS_APPROVE_ENROLMENTS_MUTATION
from organisations.factories import PersonFactory
from common.tests.utils import assert_mails_match_snapshot
@pytest.mark.django_db
def test_occurrence_enrolment_notifications_email_only(
snapshot,
notification_template_occurrence_unenrolment_fi,
notification_template_occurrence_enrolment_fi,
notification_template_occurrence_unenrolment_en,
notification_template_occurrence_enrolment_en,
mock_get_event_data,
occurrence,
study_group,
):
EnrolmentFactory(
study_group=study_group, occurrence=occurrence, person=study_group.person
)
occurrence.study_groups.remove(study_group)
# Test notification language
en_study_group = StudyGroupFactory(person=PersonFactory(language="en"))
EnrolmentFactory(
study_group=en_study_group, occurrence=occurrence, person=study_group.person
)
occurrence.study_groups.remove(en_study_group)
assert len(mail.outbox) == 4
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
@patch("occurrences.utils.notification_service.send_sms")
def test_occurrence_enrolment_notification_sms_only(
mock_send_sms,
snapshot,
notification_sms_template_occurrence_enrolment_en,
notification_sms_template_occurrence_enrolment_fi,
notification_sms_template_occurrence_unenrolment_en,
notification_sms_template_occurrence_unenrolment_fi,
mock_get_event_data,
occurrence,
study_group,
):
EnrolmentFactory(
study_group=study_group,
occurrence=occurrence,
notification_type=NOTIFICATION_TYPE_SMS,
person=study_group.person,
)
occurrence.study_groups.remove(study_group)
assert len(mail.outbox) == 0
assert mock_send_sms.call_count == 2
@pytest.mark.django_db
@patch("occurrences.utils.notification_service.send_sms")
def test_occurrence_enrolment_notification_sms_and_email(
mock_send_sms,
snapshot,
notification_template_occurrence_unenrolment_fi,
notification_template_occurrence_enrolment_fi,
notification_template_occurrence_unenrolment_en,
notification_template_occurrence_enrolment_en,
notification_sms_template_occurrence_enrolment_en,
notification_sms_template_occurrence_enrolment_fi,
notification_sms_template_occurrence_unenrolment_en,
notification_sms_template_occurrence_unenrolment_fi,
mock_get_event_data,
occurrence,
study_group,
):
Enrolment.objects.create(
study_group=study_group,
occurrence=occurrence,
notification_type=NOTIFICATION_TYPE_ALL,
person=study_group.person,
)
occurrence.study_groups.remove(study_group)
assert len(mail.outbox) == 2
assert mock_send_sms.call_count == 2
@pytest.mark.django_db
def test_approve_enrolment_notification_email(
mock_get_event_data,
mock_enrolment_unique_id,
notification_template_enrolment_approved_en,
notification_template_enrolment_approved_fi,
snapshot,
):
study_group = StudyGroupFactory(group_size=5)
occurrence = OccurrenceFactory(amount_of_seats=10)
enrolment = EnrolmentFactory(
study_group=study_group, occurrence=occurrence, person=study_group.person
)
p_event = occurrence.p_event
# To test the cancel link generated only if event only requires 1 occurrence per
# enrolment
p_event.needed_occurrences = 1
p_event.save()
enrolment.approve(custom_message="custom message")
assert len(mail.outbox) == 1
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
def test_decline_enrolment_notification_email(
mock_get_event_data,
mock_enrolment_unique_id,
notification_template_enrolment_declined_en,
notification_template_enrolment_declined_fi,
snapshot,
occurrence,
study_group,
):
enrolment = EnrolmentFactory(
study_group=study_group, occurrence=occurrence, person=study_group.person
)
enrolment.decline(custom_message="custom message")
assert len(mail.outbox) == 1
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
def test_cancel_enrolment_notification_email(
mock_get_event_data,
mock_enrolment_unique_id,
notification_template_enrolment_cancellation_confirmation_en,
notification_template_enrolment_cancellation_confirmation_fi,
snapshot,
occurrence,
study_group,
):
enrolment = EnrolmentFactory(
study_group=study_group, occurrence=occurrence, person=study_group.person
)
enrolment.ask_cancel_confirmation(custom_message="custom message")
assert len(mail.outbox) == 1
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
def test_cancelled_enrolment_notification_email(
mock_get_event_data,
notification_template_enrolment_cancelled_en,
notification_template_enrolment_cancelled_fi,
snapshot,
occurrence,
study_group,
):
person = PersonFactory(email_address="<EMAIL>")
study_group = StudyGroupFactory(person=person)
enrolment = EnrolmentFactory(
study_group=study_group, occurrence=occurrence, person=person
)
enrolment.cancel(custom_message="custom message")
assert len(mail.outbox) == 1
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
def test_occurrence_enrolment_notifications_to_contact_person(
snapshot,
notification_template_occurrence_unenrolment_fi,
notification_template_occurrence_enrolment_fi,
notification_template_occurrence_unenrolment_en,
notification_template_occurrence_enrolment_en,
mock_get_event_data,
occurrence,
study_group,
):
contact_person = PersonFactory(email_address="<EMAIL>")
Enrolment.objects.create(
study_group=study_group, occurrence=occurrence, person=contact_person
)
occurrence.study_groups.remove(study_group)
# Test notification language
en_study_group = StudyGroupFactory(
person=PersonFactory(language="en", email_address="<EMAIL>")
)
Enrolment.objects.create(
study_group=en_study_group, occurrence=occurrence, person=contact_person
)
occurrence.study_groups.remove(en_study_group)
assert len(mail.outbox) == 4
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
def test_cancel_occurrence_notification(
snapshot,
occurrence,
mock_get_event_data,
notification_template_cancel_occurrence_en,
notification_template_cancel_occurrence_fi,
):
for status in Enrolment.STATUSES:
for s in StudyGroupFactory.create_batch(2):
EnrolmentFactory(
study_group=s, occurrence=occurrence, person=s.person, status=status[0]
)
notifiable_enrolments_count = occurrence.enrolments.all().count() - (
occurrence.enrolments.filter(
status__in=[Enrolment.STATUS_CANCELLED, Enrolment.STATUS_DECLINED]
).count()
)
occurrence.cancel(reason="Occurrence cancel reason")
# Cancellation messages should not be sent to enrolments
# that are already cancelled or declined.
assert len(mail.outbox) == notifiable_enrolments_count
assert_mails_match_snapshot(snapshot)
@pytest.mark.parametrize(
"tz", [pytz.timezone("Europe/Helsinki"), pytz.utc, pytz.timezone("US/Eastern")],
)
@pytest.mark.django_db
def test_local_time_notification(
tz,
snapshot,
mock_get_event_data,
notification_template_occurrence_enrolment_en,
notification_template_occurrence_enrolment_fi,
study_group,
):
dt = datetime.now()
occurrence = OccurrenceFactory(start_time=dt.astimezone(tz))
EnrolmentFactory(
study_group=study_group, occurrence=occurrence, person=study_group.person
)
# Different timezone should result same localtime in email
assert_mails_match_snapshot(snapshot)
@pytest.mark.parametrize(
"auto_acceptance", [True, False],
)
@pytest.mark.django_db
def test_only_send_approved_notification(
auto_acceptance,
snapshot,
mock_get_event_data,
notification_template_occurrence_enrolment_en,
notification_template_enrolment_approved_en,
notification_template_occurrence_enrolment_fi,
notification_template_enrolment_approved_fi,
):
study_group = StudyGroupFactory(group_size=5)
occurrence = OccurrenceFactory(
p_event__auto_acceptance=auto_acceptance, amount_of_seats=10
)
enrol = EnrolmentFactory(
study_group=study_group, occurrence=occurrence, person=study_group.person
)
assert len(mail.outbox) == (0 if auto_acceptance else 1)
# Fake auto approval because it can only be triggered from approve mutation
enrol.approve()
assert len(mail.outbox) == (1 if auto_acceptance else 2)
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
def test_send_enrolment_summary_report(
snapshot, mock_get_event_data, notification_template_enrolment_summary_report_fi,
):
p_event_1 = PalvelutarjotinEventFactory.create()
occurrence_1_1 = OccurrenceFactory.create(id=11, p_event=p_event_1)
occurrence_1_2 = OccurrenceFactory.create(id=12, p_event=p_event_1)
occurrence_1_3 = OccurrenceFactory.create(id=13, p_event=p_event_1)
EnrolmentFactory.create(status=Enrolment.STATUS_APPROVED, occurrence=occurrence_1_1)
EnrolmentFactory.create_batch(
3, status=Enrolment.STATUS_PENDING, occurrence=occurrence_1_1
)
EnrolmentFactory.create(status=Enrolment.STATUS_PENDING, occurrence=occurrence_1_2)
EnrolmentFactory.create(
status=Enrolment.STATUS_CANCELLED, occurrence=occurrence_1_3
)
p_event_2 = PalvelutarjotinEventFactory.create()
occurrence_2_1 = OccurrenceFactory.create(id=21, p_event=p_event_2)
occurrence_2_2 = OccurrenceFactory.create(id=22, p_event=p_event_2)
EnrolmentFactory.create(status=Enrolment.STATUS_PENDING, occurrence=occurrence_2_1)
EnrolmentFactory.create(status=Enrolment.STATUS_PENDING, occurrence=occurrence_2_2)
# Event with same contact person with event 2
p_event_3 = PalvelutarjotinEventFactory.create(
contact_email=p_event_2.contact_email
)
occurrence_3_1 = OccurrenceFactory.create(id=31, p_event=p_event_3)
EnrolmentFactory.create(status=Enrolment.STATUS_PENDING, occurrence=occurrence_3_1)
# Event with auto_acceptance is True
p_event_4 = PalvelutarjotinEventFactory.create(
auto_acceptance=True, contact_email=p_event_2.contact_email
)
occurrence_4_1 = OccurrenceFactory.create(id=41, p_event=p_event_4)
EnrolmentFactory.create(status=Enrolment.STATUS_APPROVED, occurrence=occurrence_4_1)
old_enrolment = EnrolmentFactory.create(
status=Enrolment.STATUS_APPROVED, occurrence=occurrence_4_1
)
old_enrolment.enrolment_time = timezone.now() - timedelta(days=10)
old_enrolment.save()
Enrolment.objects.send_enrolment_summary_report_to_providers()
assert len(mail.outbox) == 2
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
def test_decline_enrolment_notification_email_to_multiple_contact_person(
mock_get_event_data,
notification_template_enrolment_declined_en,
notification_template_enrolment_declined_fi,
snapshot,
occurrence,
study_group,
):
# Single contact person
enrolment = EnrolmentFactory(
study_group=study_group, occurrence=occurrence, person=study_group.person
)
enrolment.decline(custom_message="custom message")
assert len(mail.outbox) == 1
assert_mails_match_snapshot(snapshot)
# Enrolment of two different contact person
enrolment_2 = EnrolmentFactory()
enrolment_2.decline(custom_message="custom message")
assert len(mail.outbox) == 3
assert_mails_match_snapshot(snapshot)
@pytest.mark.django_db
def test_mass_approve_enrolment_mutation(
snapshot,
staff_api_client,
mock_get_event_data,
mock_enrolment_unique_id,
notification_template_enrolment_approved_en,
notification_template_enrolment_approved_fi,
):
occurrence = OccurrenceFactory(
p_event__needed_occurrences=1,
p_event__auto_acceptance=False,
amount_of_seats=100,
)
enrolment_1 = EnrolmentFactory(occurrence=occurrence, study_group__group_size=10)
enrolment_2 = EnrolmentFactory(occurrence=occurrence, study_group__group_size=10)
enrolment_3 = EnrolmentFactory(occurrence=occurrence, study_group__group_size=10)
staff_api_client.user.person.organisations.add(occurrence.p_event.organisation)
staff_api_client.execute(
MASS_APPROVE_ENROLMENTS_MUTATION,
variables={
"input": {
"enrolmentIds": [
to_global_id("EnrolmentNode", enrolment_1.id),
to_global_id("EnrolmentNode", enrolment_2.id),
to_global_id("EnrolmentNode", enrolment_3.id),
],
"customMessage": "Custom message",
}
},
)
# Two people got email for each enrolment
# (study group contact person & enrolment teacher)
assert len(mail.outbox) == 6
assert_mails_match_snapshot(snapshot)
| StarcoderdataPython |
9706820 | <filename>memote/suite/cli/reports.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide commands for generating report files."""
from __future__ import absolute_import
import logging
import sys
from builtins import open
import click
import git
from sqlalchemy.exc import ArgumentError
import os
import memote.suite.api as api
import memote.suite.results as managers
import memote.suite.cli.callbacks as callbacks
from memote.suite.cli import CONTEXT_SETTINGS
from memote.suite.reporting import ReportConfiguration
LOGGER = logging.getLogger(__name__)
@click.group()
@click.help_option("--help", "-h")
def report():
"""Generate one of three different types of reports."""
pass
@report.command(context_settings=CONTEXT_SETTINGS)
@click.help_option("--help", "-h")
@click.argument("model", type=click.Path(exists=True, dir_okay=False),
envvar="MEMOTE_MODEL",
callback=callbacks.validate_model)
@click.option("--filename", type=click.Path(exists=False, writable=True),
default="index.html", show_default=True,
help="Path for the HTML report output.")
@click.option("--pytest-args", "-a", callback=callbacks.validate_pytest_args,
help="Any additional arguments you want to pass to pytest. "
"Should be given as one continuous string.")
@click.option("--exclusive", type=str, multiple=True, metavar="TEST",
help="The name of a test or test module to be run exclusively. "
"All other tests are skipped. This option can be used "
"multiple times and takes precedence over '--skip'.")
@click.option("--skip", type=str, multiple=True, metavar="TEST",
help="The name of a test or test module to be skipped. This "
"option can be used multiple times.")
@click.option("--solver", type=click.Choice(["cplex", "glpk", "gurobi"]),
default="glpk", show_default=True,
help="Set the solver to be used.")
@click.option("--experimental", type=click.Path(exists=True, dir_okay=False),
default=None, callback=callbacks.validate_experimental,
help="Define additional tests using experimental data.")
@click.option("--custom-tests", type=click.Path(exists=True, file_okay=False),
multiple=True,
help="A path to a directory containing custom test "
"modules. Please refer to the documentation for more "
"information on how to write custom tests. May be "
"specified multiple times.")
@click.option("--custom-config", type=click.Path(exists=True, dir_okay=False),
multiple=True,
help="A path to a report configuration file that will be merged "
"into the default configuration. It's primary use is to "
"configure the placement and scoring of custom tests but "
"it can also alter the default behavior. Please refer to "
"the documentation for the expected YAML format used. This "
"option can be specified multiple times.")
def snapshot(model, filename, pytest_args, exclusive, skip, solver,
experimental, custom_tests, custom_config):
"""
Take a snapshot of a model's state and generate a report.
MODEL: Path to model file. Can also be supplied via the environment variable
MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
"""
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
model.solver = solver
_, results = api.test_model(model, results=True, pytest_args=pytest_args,
skip=skip, exclusive=exclusive,
experimental=experimental)
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing snapshot report to '%s'.", filename)
file_handle.write(api.snapshot_report(results, config))
@report.command(context_settings=CONTEXT_SETTINGS)
@click.help_option("--help", "-h")
@click.option("--location", envvar="MEMOTE_LOCATION",
help="Location of test results. Can either by a directory or an "
"rfc1738 compatible database URL.")
@click.option("--model", envvar="MEMOTE_MODEL",
help="The path of the model file. Used to check if it was "
"modified.")
@click.option("--filename", type=click.Path(exists=False, writable=True),
default="index.html", show_default=True,
help="Path for the HTML report output.")
@click.option("--deployment", default="gh-pages", show_default=True,
help="Results will be read from and committed to the given "
"branch.")
@click.option("--custom-config", type=click.Path(exists=True, dir_okay=False),
multiple=True,
help="A path to a report configuration file that will be merged "
"into the default configuration. It's primary use is to "
"configure the placement and scoring of custom tests but "
"it can also alter the default behavior. Please refer to "
"the documentation for the expected YAML format used. This "
"option can be specified multiple times.")
def history(location, model, filename, deployment, custom_config):
"""Generate a report over a model's git commit history."""
if model is None:
raise click.BadParameter("No 'model' path given or configured.")
if location is None:
raise click.BadParameter("No 'location' given or configured.")
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.critical(
"The history report requires a git repository in order to check "
"the model's commit history.")
sys.exit(1)
previous = repo.active_branch
repo.heads[deployment].checkout()
try:
manager = managers.SQLResultManager(repository=repo, location=location)
except (AttributeError, ArgumentError):
manager = managers.RepoResultManager(
repository=repo, location=location)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
history = managers.HistoryManager(repository=repo, manager=manager)
history.load_history(model, skip={deployment})
report = api.history_report(history, config=config)
previous.checkout()
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(report)
@report.command(context_settings=CONTEXT_SETTINGS)
@click.help_option("--help", "-h")
@click.argument("models", type=click.Path(exists=True, dir_okay=False),
nargs=-1)
@click.option("--filename", type=click.Path(exists=False, writable=True),
default="index.html", show_default=True,
help="Path for the HTML report output.")
@click.option("--pytest-args", "-a", callback=callbacks.validate_pytest_args,
help="Any additional arguments you want to pass to pytest. "
"Should be given as one continuous string.")
@click.option("--exclusive", type=str, multiple=True, metavar="TEST",
help="The name of a test or test module to be run exclusively. "
"All other tests are skipped. This option can be used "
"multiple times and takes precedence over '--skip'.")
@click.option("--skip", type=str, multiple=True, metavar="TEST",
help="The name of a test or test module to be skipped. This "
"option can be used multiple times.")
@click.option("--solver", type=click.Choice(["cplex", "glpk", "gurobi"]),
default="glpk", show_default=True,
help="Set the solver to be used.")
@click.option("--experimental", type=click.Path(exists=True, dir_okay=False),
default=None, callback=callbacks.validate_experimental,
help="Define additional tests using experimental data.")
@click.option("--custom-tests", type=click.Path(exists=True, file_okay=False),
multiple=True,
help="A path to a directory containing custom test "
"modules. Please refer to the documentation for more "
"information on how to write custom tests "
"(memote.readthedocs.io). This option can be specified "
"multiple times.")
@click.option("--custom-config", type=click.Path(exists=True, dir_okay=False),
multiple=True,
help="A path to a report configuration file that will be merged "
"into the default configuration. It's primary use is to "
"configure the placement and scoring of custom tests but "
"it can also alter the default behavior. Please refer to "
"the documentation for the expected YAML format used "
"(memote.readthedocs.io). This option can be specified "
"multiple times.")
def diff(models, filename, pytest_args, exclusive, skip, solver,
experimental, custom_tests, custom_config):
"""
Take a snapshot of all the supplied models and generate a diff report.
MODELS: List of paths to two or more model files.
"""
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
# Build the diff report specific data structure
diff_results = dict()
for model_path in models:
try:
model_filename = os.path.basename(model_path)
diff_results.setdefault(model_filename, dict())
model = callbacks._load_model(model_path)
model.solver = solver
_, diff_results[model_filename] = api.test_model(
model, results=True, pytest_args=pytest_args,
skip=skip, exclusive=exclusive, experimental=experimental)
except Exception as e:
LOGGER.warning(
"The following exception occurred while loading model {}: {}"
"".format(model_filename, e))
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing diff report to '%s'.", filename)
file_handle.write(api.diff_report(diff_results, config))
| StarcoderdataPython |
6622474 | import os
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from encoding import MolecularEncoder
ST1_ENERGY_GAP_MEAN = 0.8486
ST1_ENERGY_GAP_STD = 0.3656
class SSDDataset(Dataset):
"""A dataset class for `Samsung AI Challenge For Scientific Discovery` competition.
Args:
dataset: A pandas dataframe object containing energy informations.
structure_files: A list of SDF molfiles.
encoder: A molecular structure encoder.
bond_drop_prob: The probability of dropping molecular bonds. Default is `0.1`.
"""
def __init__(
self,
dataset: pd.DataFrame,
structure_files: List[str],
encoder: MolecularEncoder,
bond_drop_prob: float = 0.1,
):
self.examples = []
self.encoder = encoder
self.bond_drop_prob = bond_drop_prob
for structure_file in structure_files:
example = {"uid": os.path.basename(structure_file)[:-4]}
with open(structure_file, "r") as fp:
example["structure"] = parse_mol_structure(fp.read())
if example["structure"] is None:
continue
if "S1_energy(eV)" in dataset and "T1_energy(eV)" in dataset:
s1_energy = dataset.loc[example["uid"], "S1_energy(eV)"]
t1_energy = dataset.loc[example["uid"], "T1_energy(eV)"]
labels = s1_energy - t1_energy
labels = (labels - ST1_ENERGY_GAP_MEAN) / ST1_ENERGY_GAP_STD
example["labels"] = labels
self.examples.append(example)
def __len__(self) -> int:
return len(self.examples)
def __getitem__(
self, index: int
) -> Tuple[str, Dict[str, Union[str, List[Union[int, float]]]]]:
example = self.examples[index]
if np.random.rand() < self.bond_drop_prob:
# We will drop the molecular bonds with probability of 15%. That is, the
# expectation of the number of dropped molecular bonds is 85% of the
# original one. Note that you can only control the molecular selecting
# probability, not the individual bond dropping probability.
structure = example["structure"].copy()
structure["bonds"] = [
bond for bond in structure["bonds"] if np.random.rand() > 0.15
]
example["structure"] = structure
encoding = self.encoder.encode(example["structure"])
if "labels" in example:
encoding["labels"] = example["labels"]
return (example["uid"], encoding)
def parse_mol_structure(data: str) -> Optional[Dict]:
"""Parse a SDF molecular file to the simple structure dictionary.
Args:
data: The content of SDF molfile.
Returns:
The parsed 3D molecular structure dictionary.
"""
data = data.splitlines()
if len(data) < 4:
return None
data = data[3:]
num_atoms, num_bonds = int(data[0][:3]), int(data[0][3:6])
atoms = []
for line in data[1 : 1 + num_atoms]:
x, y, z = float(line[:10]), float(line[10:20]), float(line[20:30])
charge = [0, 3, 2, 1, "^", -1, -2, -3][int(line[36:39])]
atoms.append([x, y, z, line[31:34].strip(), charge])
bonds = []
for line in data[1 + num_atoms : 1 + num_atoms + num_bonds]:
bonds.append([int(line[:3]) - 1, int(line[3:6]) - 1, int(line[6:9])])
for line in data[1 + num_atoms + num_bonds :]:
if not line.startswith("M CHG") and not line.startswith("M RAD"):
continue
for i in range(int(line[6:9])):
idx = int(line[10 + 8 * i : 13 + 8 * i]) - 1
value = int(line[14 + 8 * i : 17 + 8 * i])
atoms[idx][4] = (
[":", "^", "^^"][value - 1] if line.startswith("M RAD") else value
)
return {"atoms": atoms, "bonds": bonds}
| StarcoderdataPython |
5156088 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 21 20:24:16 2020
@author: <NAME> & <NAME>
"""
import matplotlib.pyplot as plt
from matplotlib import animation, rc
rc('animation', html='jshtml')
from IPython.display import HTML
#from IPython.display import display, clear_output
import PIL
import zarr
from pathlib import Path
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
#import scipy as sp
import itertools as it
import seaborn as sns
from l5kit.data import ChunkedDataset, LocalDataManager
from l5kit.dataset import EgoDataset, AgentDataset
from l5kit.rasterization import build_rasterizer
from l5kit.configs import load_config_data
from l5kit.visualization import draw_trajectory, TARGET_POINTS_COLOR
from l5kit.geometry import transform_points
#from tqdm import tqdm
#from collections import Counter
from l5kit.data import PERCEPTION_LABELS
#from prettytable import PrettyTable
from IPython.display import display, clear_output
import os
# import gc
# import os
# from pathlib import Path
# import random
# import sys
# from tqdm.notebook import tqdm
# import numpy as np
# import pandas as pd
# import scipy as sp
# import matplotlib.pyplot as plt
# import seaborn as sns
# from IPython.core.display import display, HTML
# # --- plotly ---
# from plotly import tools, subplots
# import plotly.offline as py
# py.init_notebook_mode(connected=True)
# import plotly.graph_objs as go
# import plotly.express as px
# import plotly.figure_factory as ff
# import plotly.io as pio
# pio.templates.default = "plotly_dark"
# # --- models ---
# from sklearn import preprocessing
# from sklearn.model_selection import KFold
# import lightgbm as lgb
# import xgboost as xgb
# import catboost as cb
# pd.set_option('max_columns', 50)
os.environ["L5KIT_DATA_FOLDER"] = "data"
cfg = load_config_data("examples/visualisation/visualisation_config.yaml")
print(cfg)
# Loading sample data for EDA
# set env variable for data
dm = LocalDataManager()
dataset_path = dm.require('scenes/sample.zarr')
zarr_dataset = ChunkedDataset(dataset_path)
zarr_dataset.open()
print(zarr_dataset)
frames = zarr_dataset.frames
agents = zarr_dataset.agents
scenes = zarr_dataset.scenes
# tl_faces = zarr_dataset.tl_faces
# Visualization Functions
def animate_solution(images, timestamps=None):
def animate(i):
changed_artifacts = [im]
im.set_data(images[i])
if timestamps is not None:
time_text.set_text(timestamps[i])
changed_artifacts.append(im)
return tuple(changed_artifacts)
fig, ax = plt.subplots()
im = ax.imshow(images[0])
if timestamps is not None:
time_text = ax.text(0.02, 0.95, "", transform=ax.transAxes)
anim = animation.FuncAnimation(fig, animate, frames=len(images), interval=60, blit=True)
# To prevent plotting image inline.
plt.close()
return anim
def visualize_rgb_image(dataset, index, title="", ax=None):
"""Visualizes Rasterizer's RGB image"""
data = dataset[index]
im = data["image"].transpose(1, 2, 0)
im = dataset.rasterizer.to_rgb(im)
if ax is None:
fig, ax = plt.subplots()
if title:
ax.set_title(title)
ax.imshow(im[::-1])
def create_animate_for_indexes(dataset, indexes):
images = []
timestamps = []
for idx in indexes:
data = dataset[idx]
im = data["image"].transpose(1, 2, 0)
im = dataset.rasterizer.to_rgb(im)
target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"])
center_in_pixels = np.asarray(cfg["raster_params"]["ego_center"]) * cfg["raster_params"]["raster_size"]
draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR)
clear_output(wait=True)
images.append(PIL.Image.fromarray(im[::-1]))
timestamps.append(data["timestamp"])
anim = animate_solution(images, timestamps)
return anim
def create_animate_for_scene(dataset, scene_idx):
indexes = dataset.get_scene_indices(scene_idx)
return create_animate_for_indexes(dataset, indexes)
# Prepare all rasterizer and EgoDataset for each rasterizer
rasterizer_dict = {}
dataset_dict = {}
rasterizer_type_list = ["py_satellite", "satellite_debug", "py_semantic", "semantic_debug", "box_debug", "stub_debug"]
for i, key in enumerate(rasterizer_type_list):
# print("key", key)
cfg["raster_params"]["map_type"] = key
rasterizer_dict[key] = build_rasterizer(cfg, dm)
dataset_dict[key] = EgoDataset(cfg, zarr_dataset, rasterizer_dict[key])
# default lane color is "light yellow" (255, 217, 82).
# green, yellow, red color on lane is to show trafic light condition.
# orange box represents crosswalk
fig, axes = plt.subplots(2, 3, figsize=(15, 10))
axes = axes.flatten()
for i, key in enumerate(["stub_debug", "satellite_debug", "semantic_debug", "box_debug", "py_satellite", "py_semantic"]):
visualize_rgb_image(dataset_dict[key], index=0, title=f"{key}: {type(rasterizer_dict[key]).__name__}", ax=axes[i])
fig.show()
# Scenes animations
# That will work just fine in the notebook
dataset = dataset_dict["py_semantic"]
plt.rcParams['animation.embed_limit'] = 4**128
SceneIndex = 10
print("scene_idx", SceneIndex)
anim = create_animate_for_scene(dataset, SceneIndex)
display(HTML(anim.to_jshtml()))
# Agents EDA
agent = agents[0]
PERCEPTION_LABELS = [
"NOT_SET",
"UNKNOWN",
"DONTCARE",
"CAR",
"VAN",
"TRAM",
"BUS",
"TRUCK",
"EMERGENCY_VEHICLE",
"OTHER_VEHICLE",
"BICYCLE",
"MOTORCYCLE",
"CYCLIST",
"MOTORCYCLIST",
"PEDESTRIAN",
"ANIMAL",
"DONTCARE",
]
DATA_ROOT = Path("C:/Users/Omar/Documents/GitHub/Kaggle-Lyft/data")
#A robust and fast interface to load l5kit data into Pandas dataframes
class BaseParser:
field = "scenes"
dtypes = {}
def __init__(self, start=0, end=None, chunk_size=1000, max_chunks=1000, root=DATA_ROOT,
zarr_path="scenes/sample.zarr"):
self.start = start
self.end = end
self.chunk_size = chunk_size
self.max_chunks = max_chunks
self.root = Path(root)
assert self.root.exists(), "There is nothing at {}!".format(self.root)
self.zarr_path = Path(zarr_path)
def parse(self):
raise NotImplementedError
def to_pandas(self, start=0, end=None, chunk_size=None, max_chunks=None):
start = start or self.start
end = end or self.end
chunk_size = chunk_size or self.chunk_size
max_chunks = max_chunks or self.max_chunks
if not chunk_size or not max_chunks: # One shot load, suitable for small zarr files
df = zarr.load(self.root.joinpath(self.zarr_path).as_posix()).get(self.field)
df = df[start:end]
df = map(self.parse, df)
else: # Chunked load, suitable for large zarr files
df = []
with zarr.open(self.root.joinpath(self.zarr_path).as_posix(), "r") as zf:
end = start + max_chunks * chunk_size if end is None else min(end, start + max_chunks * chunk_size)
for i_start in range(start, end, chunk_size):
items = zf[self.field][i_start: min(i_start + chunk_size, end)]
items = map(self.parse, items)
df.append(items)
df = it.chain(*df)
df = pd.DataFrame.from_records(df)
for col, col_dtype in self.dtypes.items():
df[col] = df[col].astype(col_dtype, copy=False)
return df
class AgentParser(BaseParser):
field = "agents"
@staticmethod
def parse(agent):
frame_dict = {
'centroid_x': agent[0][0],
'centroid_y': agent[0][1],
'extent_x': agent[1][0],
'extent_y': agent[1][1],
'extent_z': agent[1][2],
'yaw': agent[2],
"velocity_x": agent[3][0],
"velocity_y": agent[3][1],
"track_id": agent[4],
}
for p_label, p in zip(PERCEPTION_LABELS, agent[5]):
frame_dict["{}".format(p_label)] = p
return frame_dict
def to_pandas(self, start=0, end=None, chunk_size=None, max_chunks=None, frame=None):
if frame is not None:
start = int(frame.agent_index_interval_start)
end = int(frame.agent_index_interval_end)
df = super().to_pandas(start=start, end=end, chunk_size=chunk_size, max_chunks=max_chunks)
return df
ap = AgentParser()
agents_df = ap.to_pandas(frame=None)
agents_df.head()
agents_df.columns
# Agents EDA
agents_df.describe()
agents_df.shape
agents_df.info()
agents_labels = [agents_df.CAR.sum(), agents_df.PEDESTRIAN.sum(), agents_df.CYCLIST.sum()]
colormap = plt.cm.magma
corr_matrix = ["centroid_x", "centroid_y", "extent_x", "extent_y", "extent_z", "yaw", 'velocity_x', 'velocity_y', "CAR","PEDESTRIAN","CYCLIST" ]
plt.figure(figsize=(20,20));
plt.title('Pearson correlation of features', y=1.0, size=14);
sns.heatmap(agents_df[corr_matrix].corr(),linewidths=0.1,vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=True)
sns.scatterplot(agents_df["centroid_x"],agents_df["centroid_y"])
agents_CPC_df = agents_df.loc[((agents_df.CAR>= 0.5)|(agents_df.PEDESTRIAN>= 0.5)|(agents_df.CYCLIST>= 0.5))]
agents_CPC_df.info()
agents_CPC_df.centroid_x.idxmax()
colormap = plt.cm.magma
corr_matrix = ["centroid_x", "centroid_y", "extent_x", "extent_y", "extent_z", "yaw", 'velocity_x', 'velocity_y', "CAR","PEDESTRIAN","CYCLIST" ]
plt.figure(figsize=(20,20));
plt.title('Pearson correlation of features', y=1.0, size=14);
sns.heatmap(agents_CPC_df[corr_matrix].corr(),linewidths=0.1,vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=True)
agents_CAR_df = agents_df.loc[((agents_df.CAR>= 0.5))]
agents_CAR_df.info()
colormap = plt.cm.magma
corr_matrix = ["centroid_x", "centroid_y", "extent_x", "extent_y", "extent_z", "yaw", 'velocity_x', 'velocity_y']
plt.figure(figsize=(20,20));
plt.title('Pearson correlation of features', y=1.0, size=14);
sns.heatmap(agents_CAR_df[corr_matrix].corr(),linewidths=0.1,vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=True)
| StarcoderdataPython |
9711749 | from ..query.Queryable import Queryable
from ..providers import IQueryProvider
from ..visitors.sql import SqlVisitor
class SqliteQueryProvider(IQueryProvider):
def __init__(self, db_provider):
self.__provider = db_provider
self.__visitor = SqlVisitor()
@property
def db_provider(self):
return self.__provider
@property
def provider_visitor(self):
return self.__visitor
def createQuery(self, expression):
"""
Create Queryable instance from given expression
:param expression: An AST expression instance
:return: Queryable
"""
return Queryable(expression, self)
def execute(self, expression):
"""
Executes the SQL using the instance's db_provider
:param expression: An AST expression instance
:return: db_provider cursor object
"""
cursor = self.db_provider.connection.cursor()
queryable = self.createQuery(expression)
cursor.execute(queryable.sql)
return cursor
| StarcoderdataPython |
11221024 | <reponame>fcr--/lecli
"""
Team API module.
"""
import sys
import click
import requests
from tabulate import tabulate
from lecli import api_utils
from lecli import response_utils
def _url(provided_path_parts=()):
"""
Get rest query url of account resource id.
"""
ordered_path_parts = ['management', 'accounts', api_utils.get_account_resource_id(), 'teams']
ordered_path_parts.extend(provided_path_parts)
return api_utils.build_url(ordered_path_parts)
def print_teams(response):
"""
Print teams.
"""
for item in response:
click.echo("ID: %s" % item['id'])
click.echo("Name: %s" % item['name'])
click.echo("Users: %s" % tabulate(item['users']))
def print_team(response):
"""
Print team.
"""
click.echo("ID: %s" % response['id'])
click.echo("Name: %s" % response['name'])
click.echo("Users: %s" % tabulate(response['users']))
def handle_get_teams_response(response):
"""
Handle get teams response.
"""
if response_utils.response_error(response):
sys.exit(1)
elif response.status_code == 200:
if response.json().get('teams'):
print_teams(response.json()['teams'])
elif response.json().get('team'):
print_team(response.json()['team'])
def get_teams():
"""
Get teams associated with the user.
"""
headers = api_utils.generate_headers('rw')
try:
response = requests.get(_url()[1], data='', headers=headers)
handle_get_teams_response(response)
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
def get_team(team_id):
"""
Get a specific team.
"""
headers = api_utils.generate_headers('rw')
params = {'teamid': team_id}
try:
response = requests.get(_url((team_id,))[1], params=params, headers=headers)
handle_get_teams_response(response)
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
def create_team(name):
"""
Add a new user to the current account.
"""
params = {
'team': {
'name': str(name),
'users': []
}
}
headers = api_utils.generate_headers('rw')
try:
response = requests.post(_url()[1], json=params, headers=headers)
if response_utils.response_error(response):
click.echo('Creating team failed.', err=True)
sys.exit(1)
elif response.status_code == 201:
click.echo('Team created with name: %s' % name)
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
def delete_team(team_id):
"""
Delete a team with the provided team ID.
"""
headers = api_utils.generate_headers('rw')
try:
response = requests.delete(_url((team_id,))[1], headers=headers)
if response_utils.response_error(response): # Check response has no errors
click.echo('Delete team failed.', err=True)
sys.exit(1)
elif response.status_code == 204:
click.echo('Deleted team with id: %s.' % team_id)
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
def rename_team(team_id, team_name):
"""
Rename team with the provided team_id.
"""
params = {
'team': {
'name': team_name,
# as this is a patch request, it won't modify users in the team.
# what we want is to update the name of the team only.
'users': [
{'id': ''}
]
}
}
headers = api_utils.generate_headers('rw')
try:
response = requests.patch(_url((team_id,))[1], json=params, headers=headers)
if response_utils.response_error(response): # Check response has no errors
click.echo('Renaming team with id: %s failed.' % team_id, err=True)
sys.exit(1)
elif response.status_code == 200:
click.echo("Team: '%s' renamed to: '%s'" % (team_id, team_name))
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
def add_user_to_team(team_id, user_key):
"""
Add user with the provided user_key to team with provided team_id.
"""
headers = api_utils.generate_headers('rw')
params = {'teamid': team_id}
try:
response = requests.get(_url((team_id,))[1], params=params, headers=headers)
if response.status_code == 200:
params = {
'team': {
'name': response.json()['team']['name'],
'users': [
# we are doing a patch request here so it's safe to include the user_key
# we want to add here
{'id': user_key}
]
}
}
headers = api_utils.generate_headers('rw')
try:
response = requests.patch(_url((team_id,))[1], json=params, headers=headers)
if response_utils.response_error(response): # Check response has no errors
click.echo('Adding user to team with key: %s failed.' % team_id, err=True)
sys.exit(1)
elif response.status_code == 200:
click.echo('Added user with key: %s to team.' % user_key)
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
elif response_utils.response_error(response):
click.echo('Cannot find team. Adding user to team %s failed.' % team_id, err=True)
sys.exit(1)
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
def delete_user_from_team(team_id, user_key):
"""
Delete a user from a team.
"""
headers = api_utils.generate_headers('rw')
params = {'teamid': team_id}
try:
response = requests.request('GET', _url((team_id,))[1], params=params,
headers=headers)
if response.status_code == 200:
params = {
'team': {
'name': response.json()['team']['name'],
'users': [user for user in response.json()['team']['users'] if user['id'] !=
user_key]
}
}
headers = api_utils.generate_headers('rw')
try:
response = requests.put(_url((team_id,))[1], json=params, headers=headers)
if response_utils.response_error(response): # Check response has no errors
click.echo('Deleting user from team with key: %s failed.' % team_id, err=True)
sys.exit(1)
elif response.status_code == 200:
click.echo("Deleted user with key: '%s' from team: %s" % (user_key, team_id))
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
elif response_utils.response_error(response):
click.echo('Cannot find team. Deleting user from team %s failed.' % team_id, err=True)
sys.exit(1)
except requests.exceptions.RequestException as error:
click.echo(error, err=True)
sys.exit(1)
| StarcoderdataPython |
4892523 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the retry_stats.py module."""
from __future__ import print_function
from six.moves import StringIO
from chromite.lib import cros_test_lib
from chromite.lib import parallel
from chromite.lib import retry_stats
# We access internal members to help with testing.
# pylint: disable=protected-access
class TestRetryException(Exception):
"""Used when testing failure cases."""
# TODO(crbug.com/1072139): Use 'singleton_manager' fixture when this module
# runs exclusively on Python 3.
class TestRetryStats(cros_test_lib.MockTestCase):
"""This contains test cases for the retry_stats module."""
CAT = 'Test Service A'
CAT_B = 'Test Service B'
SUCCESS_RESULT = 'success result'
def setUp(self):
retry_stats._STATS_COLLECTION = None
self._singleton_manager = parallel.Manager()
self.PatchObject(parallel, 'Manager', return_value=self._singleton_manager)
def tearDown(self):
self._singleton_manager.shutdown()
def handlerNoRetry(self, _e):
return False
def handlerRetry(self, _e):
return True
def callSuccess(self):
return self.SUCCESS_RESULT
def callFailure(self):
raise TestRetryException()
def _verifyStats(self, category, success=0, failure=0, retry=0):
"""Verify that the given category has the specified values collected."""
stats_success, stats_failure, stats_retry = retry_stats.CategoryStats(
category)
self.assertEqual(stats_success, success)
self.assertEqual(stats_failure, failure)
self.assertEqual(stats_retry, retry)
def testSetupStats(self):
"""Verify that we do something when we setup a new stats category."""
# Show that setup does something.
self.assertEqual(retry_stats._STATS_COLLECTION, None)
retry_stats.SetupStats()
self.assertNotEqual(retry_stats._STATS_COLLECTION, None)
def testReportCategoryStatsEmpty(self):
retry_stats.SetupStats()
out = StringIO()
retry_stats.ReportCategoryStats(out, self.CAT)
expected = """************************************************************
** Performance Statistics for Test Service A
**
** Success: 0
** Failure: 0
** Retries: 0
** Total: 0
************************************************************
"""
self.assertEqual(out.getvalue(), expected)
def testReportStatsEmpty(self):
retry_stats.SetupStats()
out = StringIO()
retry_stats.ReportStats(out)
# No data collected means no categories are known, nothing to report.
self.assertEqual(out.getvalue(), '')
def testReportStats(self):
retry_stats.SetupStats()
# Insert some stats to report.
retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
retry_stats.RetryWithStats(
self.CAT_B, self.handlerNoRetry, 3, self.callSuccess)
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerRetry, 3, self.callFailure)
out = StringIO()
retry_stats.ReportStats(out)
# Expecting reports for both CAT and CAT_B used above.
expected = """************************************************************
** Performance Statistics for Test Service A
**
** Success: 1
** Failure: 1
** Retries: 3
** Total: 2
************************************************************
************************************************************
** Performance Statistics for Test Service B
**
** Success: 1
** Failure: 0
** Retries: 0
** Total: 1
************************************************************
"""
self.assertEqual(out.getvalue(), expected)
def testSuccessNoSetup(self):
"""Verify that we can handle a successful call if we're never setup."""
self.assertEqual(retry_stats._STATS_COLLECTION, None)
result = retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
result = retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
self.assertEqual(retry_stats._STATS_COLLECTION, None)
def testFailureNoRetryNoSetup(self):
"""Verify that we can handle a failure call if we're never setup."""
self.assertEqual(retry_stats._STATS_COLLECTION, None)
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerNoRetry, 3, self.callFailure)
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerNoRetry, 3, self.callFailure)
self.assertEqual(retry_stats._STATS_COLLECTION, None)
def testSuccess(self):
"""Verify that we can handle a successful call."""
retry_stats.SetupStats()
self._verifyStats(self.CAT)
# Succeed once.
result = retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
self._verifyStats(self.CAT, success=1)
# Succeed twice.
result = retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
self._verifyStats(self.CAT, success=2)
def testSuccessRetry(self):
"""Verify that we can handle a successful call after tries."""
retry_stats.SetupStats()
self._verifyStats(self.CAT)
# Use this scoped list as a persistent counter.
call_counter = ['fail 1', 'fail 2']
def callRetrySuccess():
if call_counter:
raise TestRetryException(call_counter.pop())
else:
return self.SUCCESS_RESULT
# Retry twice, then succeed.
result = retry_stats.RetryWithStats(
self.CAT, self.handlerRetry, 3, callRetrySuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
self._verifyStats(self.CAT, success=1, retry=2)
def testFailureNoRetry(self):
"""Verify that we can handle a failure if the handler doesn't retry."""
retry_stats.SetupStats()
self._verifyStats(self.CAT)
# Fail once without retries.
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerNoRetry, 3, self.callFailure)
self._verifyStats(self.CAT, failure=1)
# Fail twice without retries.
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerNoRetry, 3, self.callFailure)
self._verifyStats(self.CAT, failure=2)
def testFailureRetry(self):
"""Verify that we can handle a failure if we use all retries."""
retry_stats.SetupStats()
self._verifyStats(self.CAT)
# Fail once with exhausted retries.
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerRetry, 3, self.callFailure)
self._verifyStats(self.CAT, failure=1, retry=3) # 3 retries = 4 attempts.
# Fail twice with exhausted retries.
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerRetry, 3, self.callFailure)
self._verifyStats(self.CAT, failure=2, retry=6)
| StarcoderdataPython |
11204135 | <reponame>kos-kaggle/pytorch_advanced
"""
第2章SSDで実装した内容をまとめたファイル
"""
# パッケージのimport
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Function
import torch.utils.data as data
import torch
import cv2
import numpy as np
import os.path as osp
from itertools import product as product
from math import sqrt as sqrt
# XMLをファイルやテキストから読み込んだり、加工したり、保存したりするためのライブラリ
import xml.etree.ElementTree as ET
# フォルダ「utils」のdata_augumentation.pyからimport。入力画像の前処理をするクラス
from utils.data_augumentation import Compose, ConvertFromInts, ToAbsoluteCoords, PhotometricDistort, Expand, RandomSampleCrop, RandomMirror, ToPercentCoords, Resize, SubtractMeans
# フォルダ「utils」にある関数matchを記述したmatch.pyからimport
from utils.match import match
# 学習、検証の画像データとアノテーションデータへのファイルパスリストを作成する
def make_datapath_list(rootpath):
"""
データへのパスを格納したリストを作成する。
Parameters
----------
rootpath : str
データフォルダへのパス
Returns
-------
ret : train_img_list, train_anno_list, val_img_list, val_anno_list
データへのパスを格納したリスト
"""
# 画像ファイルとアノテーションファイルへのパスのテンプレートを作成
imgpath_template = osp.join(rootpath, 'JPEGImages', '%s.jpg')
annopath_template = osp.join(rootpath, 'Annotations', '%s.xml')
# 訓練と検証、それぞれのファイルのID(ファイル名)を取得する
train_id_names = osp.join(rootpath + 'ImageSets/Main/train.txt')
val_id_names = osp.join(rootpath + 'ImageSets/Main/val.txt')
# 訓練データの画像ファイルとアノテーションファイルへのパスリストを作成
train_img_list = list()
train_anno_list = list()
for line in open(train_id_names):
file_id = line.strip() # 空白スペースと改行を除去
img_path = (imgpath_template % file_id) # 画像のパス
anno_path = (annopath_template % file_id) # アノテーションのパス
train_img_list.append(img_path) # リストに追加
train_anno_list.append(anno_path) # リストに追加
# 検証データの画像ファイルとアノテーションファイルへのパスリストを作成
val_img_list = list()
val_anno_list = list()
for line in open(val_id_names):
file_id = line.strip() # 空白スペースと改行を除去
img_path = (imgpath_template % file_id) # 画像のパス
anno_path = (annopath_template % file_id) # アノテーションのパス
val_img_list.append(img_path) # リストに追加
val_anno_list.append(anno_path) # リストに追加
return train_img_list, train_anno_list, val_img_list, val_anno_list
# 「XML形式のアノテーション」を、リスト形式に変換するクラス
class Anno_xml2list(object):
"""
1枚の画像に対する「XML形式のアノテーションデータ」を、画像サイズで規格化してからリスト形式に変換する。
Attributes
----------
classes : リスト
VOCのクラス名を格納したリスト
"""
def __init__(self, classes):
self.classes = classes
def __call__(self, xml_path, width, height):
"""
1枚の画像に対する「XML形式のアノテーションデータ」を、画像サイズで規格化してからリスト形式に変換する。
Parameters
----------
xml_path : str
xmlファイルへのパス。
width : int
対象画像の幅。
height : int
対象画像の高さ。
Returns
-------
ret : [[xmin, ymin, xmax, ymax, label_ind], ... ]
物体のアノテーションデータを格納したリスト。画像内に存在する物体数分のだけ要素を持つ。
"""
# 画像内の全ての物体のアノテーションをこのリストに格納します
ret = []
# xmlファイルを読み込む
xml = ET.parse(xml_path).getroot()
# 画像内にある物体(object)の数だけループする
for obj in xml.iter('object'):
# アノテーションで検知がdifficultに設定されているものは除外
difficult = int(obj.find('difficult').text)
if difficult == 1:
continue
# 1つの物体に対するアノテーションを格納するリスト
bndbox = []
name = obj.find('name').text.lower().strip() # 物体名
bbox = obj.find('bndbox') # バウンディングボックスの情報
# アノテーションの xmin, ymin, xmax, ymaxを取得し、0~1に規格化
pts = ['xmin', 'ymin', 'xmax', 'ymax']
for pt in (pts):
# VOCは原点が(1,1)なので1を引き算して(0, 0)に
cur_pixel = int(bbox.find(pt).text) - 1
# 幅、高さで規格化
if pt == 'xmin' or pt == 'xmax': # x方向のときは幅で割算
cur_pixel /= width
else: # y方向のときは高さで割算
cur_pixel /= height
bndbox.append(cur_pixel)
# アノテーションのクラス名のindexを取得して追加
label_idx = self.classes.index(name)
bndbox.append(label_idx)
# resに[xmin, ymin, xmax, ymax, label_ind]を足す
ret += [bndbox]
return np.array(ret) # [[xmin, ymin, xmax, ymax, label_ind], ... ]
# 入力画像の前処理をするクラス
class DataTransform():
"""
画像とアノテーションの前処理クラス。訓練と推論で異なる動作をする。
画像のサイズを300x300にする。
学習時はデータオーギュメンテーションする。
Attributes
----------
input_size : int
リサイズ先の画像の大きさ。
color_mean : (B, G, R)
各色チャネルの平均値。
"""
def __init__(self, input_size, color_mean):
self.data_transform = {
'train': Compose([
ConvertFromInts(), # intをfloat32に変換
ToAbsoluteCoords(), # アノテーションデータの規格化を戻す
PhotometricDistort(), # 画像の色調などをランダムに変化
Expand(color_mean), # 画像のキャンバスを広げる
RandomSampleCrop(), # 画像内の部分をランダムに抜き出す
RandomMirror(), # 画像を反転させる
ToPercentCoords(), # アノテーションデータを0-1に規格化
Resize(input_size), # 画像サイズをinput_size×input_sizeに変形
SubtractMeans(color_mean) # BGRの色の平均値を引き算
]),
'val': Compose([
ConvertFromInts(), # intをfloatに変換
Resize(input_size), # 画像サイズをinput_size×input_sizeに変形
SubtractMeans(color_mean) # BGRの色の平均値を引き算
])
}
def __call__(self, img, phase, boxes, labels):
"""
Parameters
----------
phase : 'train' or 'val'
前処理のモードを指定。
"""
return self.data_transform[phase](img, boxes, labels)
class VOCDataset(data.Dataset):
"""
VOC2012のDatasetを作成するクラス。PyTorchのDatasetクラスを継承。
Attributes
----------
img_list : リスト
画像のパスを格納したリスト
anno_list : リスト
アノテーションへのパスを格納したリスト
phase : 'train' or 'test'
学習か訓練かを設定する。
transform : object
前処理クラスのインスタンス
transform_anno : object
xmlのアノテーションをリストに変換するインスタンス
"""
def __init__(self, img_list, anno_list, phase, transform, transform_anno):
self.img_list = img_list
self.anno_list = anno_list
self.phase = phase # train もしくは valを指定
self.transform = transform # 画像の変形
self.transform_anno = transform_anno # アノテーションデータをxmlからリストへ
def __len__(self):
'''画像の枚数を返す'''
return len(self.img_list)
def __getitem__(self, index):
'''
前処理をした画像のテンソル形式のデータとアノテーションを取得
'''
im, gt, h, w = self.pull_item(index)
return im, gt
def pull_item(self, index):
'''前処理をした画像のテンソル形式のデータ、アノテーション、画像の高さ、幅を取得する'''
# 1. 画像読み込み
image_file_path = self.img_list[index]
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
height, width, channels = img.shape # 画像のサイズを取得
# 2. xml形式のアノテーション情報をリストに
anno_file_path = self.anno_list[index]
anno_list = self.transform_anno(anno_file_path, width, height)
# 3. 前処理を実施
img, boxes, labels = self.transform(
img, self.phase, anno_list[:, :4], anno_list[:, 4])
# 色チャネルの順番がBGRになっているので、RGBに順番変更
# さらに(高さ、幅、色チャネル)の順を(色チャネル、高さ、幅)に変換
img = torch.from_numpy(img[:, :, (2, 1, 0)]).permute(2, 0, 1)
# BBoxとラベルをセットにしたnp.arrayを作成、変数名「gt」はground truth(答え)の略称
gt = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return img, gt, height, width
def od_collate_fn(batch):
"""
Datasetから取り出すアノテーションデータのサイズが画像ごとに異なります。
画像内の物体数が2個であれば(2, 5)というサイズですが、3個であれば(3, 5)など変化します。
この変化に対応したDataLoaderを作成するために、
カスタイマイズした、collate_fnを作成します。
collate_fnは、PyTorchでリストからmini-batchを作成する関数です。
ミニバッチ分の画像が並んでいるリスト変数batchに、
ミニバッチ番号を指定する次元を先頭に1つ追加して、リストの形を変形します。
"""
targets = []
imgs = []
for sample in batch:
imgs.append(sample[0]) # sample[0] は画像imgです
targets.append(torch.FloatTensor(sample[1])) # sample[1] はアノテーションgtです
# imgsはミニバッチサイズのリストになっています
# リストの要素はtorch.Size([3, 300, 300])です。
# このリストをtorch.Size([batch_num, 3, 300, 300])のテンソルに変換します
imgs = torch.stack(imgs, dim=0)
# targetsはアノテーションデータの正解であるgtのリストです。
# リストのサイズはミニバッチサイズです。
# リストtargetsの要素は [n, 5] となっています。
# nは画像ごとに異なり、画像内にある物体の数となります。
# 5は [xmin, ymin, xmax, ymax, class_index] です
return imgs, targets
# 35層にわたる、vggモジュールを作成
def make_vgg():
layers = []
in_channels = 3 # 色チャネル数
# vggモジュールで使用する畳み込み層やマックスプーリングのチャネル数
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256,
256, 'MC', 512, 512, 512, 'M', 512, 512, 512]
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'MC':
# ceilは出力サイズを、計算結果(float)に対して、切り上げで整数にするモード
# デフォルトでは出力サイズを計算結果(float)に対して、切り下げで整数にするfloorモード
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return nn.ModuleList(layers)
# 8層にわたる、extrasモジュールを作成
def make_extras():
layers = []
in_channels = 1024 # vggモジュールから出力された、extraに入力される画像チャネル数
# extraモジュールの畳み込み層のチャネル数を設定するコンフィギュレーション
cfg = [256, 512, 128, 256, 128, 256, 128, 256]
layers += [nn.Conv2d(in_channels, cfg[0], kernel_size=(1))]
layers += [nn.Conv2d(cfg[0], cfg[1], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[1], cfg[2], kernel_size=(1))]
layers += [nn.Conv2d(cfg[2], cfg[3], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[3], cfg[4], kernel_size=(1))]
layers += [nn.Conv2d(cfg[4], cfg[5], kernel_size=(3))]
layers += [nn.Conv2d(cfg[5], cfg[6], kernel_size=(1))]
layers += [nn.Conv2d(cfg[6], cfg[7], kernel_size=(3))]
return nn.ModuleList(layers)
# デフォルトボックスのオフセットを出力するloc_layers、
# デフォルトボックスに対する各クラスの確率を出力するconf_layersを作成
def make_loc_conf(num_classes=21, bbox_aspect_num=[4, 6, 6, 6, 4, 4]):
loc_layers = []
conf_layers = []
# VGGの22層目、conv4_3(source1)に対する畳み込み層
loc_layers += [nn.Conv2d(512, bbox_aspect_num[0]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[0]
* num_classes, kernel_size=3, padding=1)]
# VGGの最終層(source2)に対する畳み込み層
loc_layers += [nn.Conv2d(1024, bbox_aspect_num[1]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(1024, bbox_aspect_num[1]
* num_classes, kernel_size=3, padding=1)]
# extraの(source3)に対する畳み込み層
loc_layers += [nn.Conv2d(512, bbox_aspect_num[2]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[2]
* num_classes, kernel_size=3, padding=1)]
# extraの(source4)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[3]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[3]
* num_classes, kernel_size=3, padding=1)]
# extraの(source5)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[4]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[4]
* num_classes, kernel_size=3, padding=1)]
# extraの(source6)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[5]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[5]
* num_classes, kernel_size=3, padding=1)]
return nn.ModuleList(loc_layers), nn.ModuleList(conf_layers)
# convC4_3からの出力をscale=20のL2Normで正規化する層
class L2Norm(nn.Module):
def __init__(self, input_channels=512, scale=20):
super(L2Norm, self).__init__() # 親クラスのコンストラクタ実行
self.weight = nn.Parameter(torch.Tensor(input_channels))
self.scale = scale # 係数weightの初期値として設定する値
self.reset_parameters() # パラメータの初期化
self.eps = 1e-10
def reset_parameters(self):
'''結合パラメータを大きさscaleの値にする初期化を実行'''
init.constant_(self.weight, self.scale) # weightの値がすべてscale(=20)になる
def forward(self, x):
'''38×38の特徴量に対して、512チャネルにわたって2乗和のルートを求めた
38×38個の値を使用し、各特徴量を正規化してから係数をかけ算する層'''
# 各チャネルにおける38×38個の特徴量のチャネル方向の2乗和を計算し、
# さらにルートを求め、割り算して正規化する
# normのテンソルサイズはtorch.Size([batch_num, 1, 38, 38])になります
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()+self.eps
x = torch.div(x, norm)
# 係数をかける。係数はチャネルごとに1つで、512個の係数を持つ
# self.weightのテンソルサイズはtorch.Size([512])なので
# torch.Size([batch_num, 512, 38, 38])まで変形します
weights = self.weight.unsqueeze(
0).unsqueeze(2).unsqueeze(3).expand_as(x)
out = weights * x
return out
# デフォルトボックスを出力するクラス
class DBox(object):
def __init__(self, cfg):
super(DBox, self).__init__()
# 初期設定
self.image_size = cfg['input_size'] # 画像サイズの300
# [38, 19, …] 各sourceの特徴量マップのサイズ
self.feature_maps = cfg['feature_maps']
self.num_priors = len(cfg["feature_maps"]) # sourceの個数=6
self.steps = cfg['steps'] # [8, 16, …] DBoxのピクセルサイズ
self.min_sizes = cfg['min_sizes']
# [30, 60, …] 小さい正方形のDBoxのピクセルサイズ(正確には面積)
self.max_sizes = cfg['max_sizes']
# [60, 111, …] 大きい正方形のDBoxのピクセルサイズ(正確には面積)
self.aspect_ratios = cfg['aspect_ratios'] # 長方形のDBoxのアスペクト比
def make_dbox_list(self):
'''DBoxを作成する'''
mean = []
# 'feature_maps': [38, 19, 10, 5, 3, 1]
for k, f in enumerate(self.feature_maps):
for i, j in product(range(f), repeat=2): # fまでの数で2ペアの組み合わせを作る f_P_2 個
# 特徴量の画像サイズ
# 300 / 'steps': [8, 16, 32, 64, 100, 300],
f_k = self.image_size / self.steps[k]
# DBoxの中心座標 x,y ただし、0~1で規格化している
cx = (j + 0.5) / f_k
cy = (i + 0.5) / f_k
# アスペクト比1の小さいDBox [cx,cy, width, height]
# 'min_sizes': [30, 60, 111, 162, 213, 264]
s_k = self.min_sizes[k]/self.image_size
mean += [cx, cy, s_k, s_k]
# アスペクト比1の大きいDBox [cx,cy, width, height]
# 'max_sizes': [60, 111, 162, 213, 264, 315],
s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size))
mean += [cx, cy, s_k_prime, s_k_prime]
# その他のアスペクト比のdefBox [cx,cy, width, height]
for ar in self.aspect_ratios[k]:
mean += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]
mean += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]
# DBoxをテンソルに変換 torch.Size([8732, 4])
output = torch.Tensor(mean).view(-1, 4)
# DBoxの大きさが1を超えている場合は1にする
output.clamp_(max=1, min=0)
return output
# オフセット情報を使い、DBoxをBBoxに変換する関数
def decode(loc, dbox_list):
"""
オフセット情報を使い、DBoxをBBoxに変換する。
Parameters
----------
loc: [8732,4]
SSDモデルで推論するオフセット情報。
dbox_list: [8732,4]
DBoxの情報
Returns
-------
boxes : [xmin, ymin, xmax, ymax]
BBoxの情報
"""
# DBoxは[cx, cy, width, height]で格納されている
# locも[Δcx, Δcy, Δwidth, Δheight]で格納されている
# オフセット情報からBBoxを求める
boxes = torch.cat((
dbox_list[:, :2] + loc[:, :2] * 0.1 * dbox_list[:, 2:],
dbox_list[:, 2:] * torch.exp(loc[:, 2:] * 0.2)), dim=1)
# boxesのサイズはtorch.Size([8732, 4])となります
# BBoxの座標情報を[cx, cy, width, height]から[xmin, ymin, xmax, ymax] に
boxes[:, :2] -= boxes[:, 2:] / 2 # 座標(xmin,ymin)へ変換
boxes[:, 2:] += boxes[:, :2] # 座標(xmax,ymax)へ変換
return boxes
# Non-Maximum Suppressionを行う関数
def nm_suppression(boxes, scores, overlap=0.45, top_k=200):
"""
Non-Maximum Suppressionを行う関数。
boxesのうち被り過ぎ(overlap以上)のBBoxを削除する。
Parameters
----------
boxes : [確信度閾値(0.01)を超えたBBox数,4]
BBox情報。
scores :[確信度閾値(0.01)を超えたBBox数]
confの情報
Returns
-------
keep : リスト
confの降順にnmsを通過したindexが格納
count:int
nmsを通過したBBoxの数
"""
# returnのひな形を作成
count = 0
keep = scores.new(scores.size(0)).zero_().long()
# keep:torch.Size([確信度閾値を超えたBBox数])、要素は全部0
# 各BBoxの面積areaを計算
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
# boxesをコピーする。後で、BBoxの被り度合いIOUの計算に使用する際のひな形として用意
tmp_x1 = boxes.new()
tmp_y1 = boxes.new()
tmp_x2 = boxes.new()
tmp_y2 = boxes.new()
tmp_w = boxes.new()
tmp_h = boxes.new()
# socreを昇順に並び変える
v, idx = scores.sort(0)
# 上位top_k個(200個)のBBoxのindexを取り出す(200個存在しない場合もある)
idx = idx[-top_k:]
# idxの要素数が0でない限りループする
while idx.numel() > 0:
i = idx[-1] # 現在のconf最大のindexをiに
# keepの現在の最後にconf最大のindexを格納する
# このindexのBBoxと被りが大きいBBoxをこれから消去する
keep[count] = i
count += 1
# 最後のBBoxになった場合は、ループを抜ける
if idx.size(0) == 1:
break
# 現在のconf最大のindexをkeepに格納したので、idxをひとつ減らす
idx = idx[:-1]
# -------------------
# これからkeepに格納したBBoxと被りの大きいBBoxを抽出して除去する
# -------------------
# ひとつ減らしたidxまでのBBoxを、outに指定した変数として作成する
torch.index_select(x1, 0, idx, out=tmp_x1)
torch.index_select(y1, 0, idx, out=tmp_y1)
torch.index_select(x2, 0, idx, out=tmp_x2)
torch.index_select(y2, 0, idx, out=tmp_y2)
# すべてのBBoxに対して、現在のBBox=indexがiと被っている値までに設定(clamp)
tmp_x1 = torch.clamp(tmp_x1, min=x1[i])
tmp_y1 = torch.clamp(tmp_y1, min=y1[i])
tmp_x2 = torch.clamp(tmp_x2, max=x2[i])
tmp_y2 = torch.clamp(tmp_y2, max=y2[i])
# wとhのテンソルサイズをindexを1つ減らしたものにする
tmp_w.resize_as_(tmp_x2)
tmp_h.resize_as_(tmp_y2)
# clampした状態でのBBoxの幅と高さを求める
tmp_w = tmp_x2 - tmp_x1
tmp_h = tmp_y2 - tmp_y1
# 幅や高さが負になっているものは0にする
tmp_w = torch.clamp(tmp_w, min=0.0)
tmp_h = torch.clamp(tmp_h, min=0.0)
# clampされた状態での面積を求める
inter = tmp_w*tmp_h
# IoU = intersect部分 / (area(a) + area(b) - intersect部分)の計算
rem_areas = torch.index_select(area, 0, idx) # 各BBoxの元の面積
union = (rem_areas - inter) + area[i] # 2つのエリアのANDの面積
IoU = inter/union
# IoUがoverlapより小さいidxのみを残す
idx = idx[IoU.le(overlap)] # leはLess than or Equal toの処理をする演算です
# IoUがoverlapより大きいidxは、最初に選んでkeepに格納したidxと同じ物体に対してBBoxを囲んでいるため消去
# whileのループが抜けたら終了
return keep, count
# SSDの推論時にconfとlocの出力から、被りを除去したBBoxを出力する
class Detect(Function):
def __init__(self, conf_thresh=0.01, top_k=200, nms_thresh=0.45):
self.softmax = nn.Softmax(dim=-1) # confをソフトマックス関数で正規化するために用意
self.conf_thresh = conf_thresh # confがconf_thresh=0.01より高いDBoxのみを扱う
self.top_k = top_k # nm_supressionでconfの高いtop_k個を計算に使用する, top_k = 200
self.nms_thresh = nms_thresh # nm_supressionでIOUがnms_thresh=0.45より大きいと、同一物体へのBBoxとみなす
def forward(self, loc_data, conf_data, dbox_list):
"""
順伝搬の計算を実行する。
Parameters
----------
loc_data: [batch_num,8732,4]
オフセット情報。
conf_data: [batch_num, 8732,num_classes]
検出の確信度。
dbox_list: [8732,4]
DBoxの情報
Returns
-------
output : torch.Size([batch_num, 21, 200, 5])
(batch_num、クラス、confのtop200、BBoxの情報)
"""
# 各サイズを取得
num_batch = loc_data.size(0) # ミニバッチのサイズ
num_dbox = loc_data.size(1) # DBoxの数 = 8732
num_classes = conf_data.size(2) # クラス数 = 21
# confはソフトマックスを適用して正規化する
conf_data = self.softmax(conf_data)
# 出力の型を作成する。テンソルサイズは[minibatch数, 21, 200, 5]
output = torch.zeros(num_batch, num_classes, self.top_k, 5)
# cof_dataを[batch_num,8732,num_classes]から[batch_num, num_classes,8732]に順番変更
conf_preds = conf_data.transpose(2, 1)
# ミニバッチごとのループ
for i in range(num_batch):
# 1. locとDBoxから修正したBBox [xmin, ymin, xmax, ymax] を求める
decoded_boxes = decode(loc_data[i], dbox_list)
# confのコピーを作成
conf_scores = conf_preds[i].clone()
# 画像クラスごとのループ(背景クラスのindexである0は計算せず、index=1から)
for cl in range(1, num_classes):
# 2.confの閾値を超えたBBoxを取り出す
# confの閾値を超えているかのマスクを作成し、
# 閾値を超えたconfのインデックスをc_maskとして取得
c_mask = conf_scores[cl].gt(self.conf_thresh)
# gtはGreater thanのこと。gtにより閾値を超えたものが1に、以下が0になる
# conf_scores:torch.Size([21, 8732])
# c_mask:torch.Size([8732])
# scoresはtorch.Size([閾値を超えたBBox数])
scores = conf_scores[cl][c_mask]
# 閾値を超えたconfがない場合、つまりscores=[]のときは、何もしない
if scores.nelement() == 0: # nelementで要素数の合計を求める
continue
# c_maskを、decoded_boxesに適用できるようにサイズを変更します
l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
# l_mask:torch.Size([8732, 4])
# l_maskをdecoded_boxesに適応します
boxes = decoded_boxes[l_mask].view(-1, 4)
# decoded_boxes[l_mask]で1次元になってしまうので、
# viewで(閾値を超えたBBox数, 4)サイズに変形しなおす
# 3. Non-Maximum Suppressionを実施し、被っているBBoxを取り除く
ids, count = nm_suppression(
boxes, scores, self.nms_thresh, self.top_k)
# ids:confの降順にNon-Maximum Suppressionを通過したindexが格納
# count:Non-Maximum Suppressionを通過したBBoxの数
# outputにNon-Maximum Suppressionを抜けた結果を格納
output[i, cl, :count] = torch.cat((scores[ids[:count]].unsqueeze(1),
boxes[ids[:count]]), 1)
return output # torch.Size([1, 21, 200, 5])
# SSDクラスを作成する
class SSD(nn.Module):
def __init__(self, phase, cfg):
super(SSD, self).__init__()
self.phase = phase # train or inferenceを指定
self.num_classes = cfg["num_classes"] # クラス数=21
# SSDのネットワークを作る
self.vgg = make_vgg()
self.extras = make_extras()
self.L2Norm = L2Norm()
self.loc, self.conf = make_loc_conf(
cfg["num_classes"], cfg["bbox_aspect_num"])
# DBox作成
dbox = DBox(cfg)
self.dbox_list = dbox.make_dbox_list()
# 推論時はクラス「Detect」を用意します
if phase == 'inference':
self.detect = Detect()
def forward(self, x):
sources = list() # locとconfへの入力source1~6を格納
loc = list() # locの出力を格納
conf = list() # confの出力を格納
# vggのconv4_3まで計算する
for k in range(23):
x = self.vgg[k](x)
# conv4_3の出力をL2Normに入力し、source1を作成、sourcesに追加
source1 = self.L2Norm(x)
sources.append(source1)
# vggを最後まで計算し、source2を作成、sourcesに追加
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# extrasのconvとReLUを計算
# source3~6を、sourcesに追加
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1: # conv→ReLU→cov→ReLUをしたらsourceに入れる
sources.append(x)
# source1~6に、それぞれ対応する畳み込みを1回ずつ適用する
# zipでforループの複数のリストの要素を取得
# source1~6まであるので、6回ループが回る
for (x, l, c) in zip(sources, self.loc, self.conf):
# Permuteは要素の順番を入れ替え
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
# l(x)とc(x)で畳み込みを実行
# l(x)とc(x)の出力サイズは[batch_num, 4*アスペクト比の種類数, featuremapの高さ, featuremap幅]
# sourceによって、アスペクト比の種類数が異なり、面倒なので順番入れ替えて整える
# permuteで要素の順番を入れ替え、
# [minibatch数, featuremap数, featuremap数,4*アスペクト比の種類数]へ
# (注釈)
# torch.contiguous()はメモリ上で要素を連続的に配置し直す命令です。
# あとでview関数を使用します。
# このviewを行うためには、対象の変数がメモリ上で連続配置されている必要があります。
# さらにlocとconfの形を変形
# locのサイズは、torch.Size([batch_num, 34928])
# confのサイズはtorch.Size([batch_num, 183372])になる
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
# さらにlocとconfの形を整える
# locのサイズは、torch.Size([batch_num, 8732, 4])
# confのサイズは、torch.Size([batch_num, 8732, 21])
loc = loc.view(loc.size(0), -1, 4)
conf = conf.view(conf.size(0), -1, self.num_classes)
# 最後に出力する
output = (loc, conf, self.dbox_list)
if self.phase == "inference": # 推論時
# クラス「Detect」のforwardを実行
# 返り値のサイズは torch.Size([batch_num, 21, 200, 5])
return self.detect(output[0], output[1], output[2])
else: # 学習時
return output
# 返り値は(loc, conf, dbox_list)のタプル
class MultiBoxLoss(nn.Module):
"""SSDの損失関数のクラスです。"""
def __init__(self, jaccard_thresh=0.5, neg_pos=3, device='cpu'):
super(MultiBoxLoss, self).__init__()
self.jaccard_thresh = jaccard_thresh # 0.5 関数matchのjaccard係数の閾値
self.negpos_ratio = neg_pos # 3:1 Hard Negative Miningの負と正の比率
self.device = device # CPUとGPUのいずれで計算するのか
def forward(self, predictions, targets):
"""
損失関数の計算。
Parameters
----------
predictions : SSD netの訓練時の出力(tuple)
(loc=torch.Size([num_batch, 8732, 4]), conf=torch.Size([num_batch, 8732, 21]), dbox_list=torch.Size [8732,4])。
targets : [num_batch, num_objs, 5]
5は正解のアノテーション情報[xmin, ymin, xmax, ymax, label_ind]を示す
Returns
-------
loss_l : テンソル
locの損失の値
loss_c : テンソル
confの損失の値
"""
# SSDモデルの出力がタプルになっているので、個々にばらす
loc_data, conf_data, dbox_list = predictions
# 要素数を把握
num_batch = loc_data.size(0) # ミニバッチのサイズ
num_dbox = loc_data.size(1) # DBoxの数 = 8732
num_classes = conf_data.size(2) # クラス数 = 21
# 損失の計算に使用するものを格納する変数を作成
# conf_t_label:各DBoxに一番近い正解のBBoxのラベルを格納させる
# loc_t:各DBoxに一番近い正解のBBoxの位置情報を格納させる
conf_t_label = torch.LongTensor(num_batch, num_dbox).to(self.device)
loc_t = torch.Tensor(num_batch, num_dbox, 4).to(self.device)
# loc_tとconf_t_labelに、
# DBoxと正解アノテーションtargetsをmatchさせた結果を上書きする
for idx in range(num_batch): # ミニバッチでループ
# 現在のミニバッチの正解アノテーションのBBoxとラベルを取得
truths = targets[idx][:, :-1].to(self.device) # BBox
# ラベル [物体1のラベル, 物体2のラベル, …]
labels = targets[idx][:, -1].to(self.device)
# デフォルトボックスを新たな変数で用意
dbox = dbox_list.to(self.device)
# 関数matchを実行し、loc_tとconf_t_labelの内容を更新する
# (詳細)
# loc_t:各DBoxに一番近い正解のBBoxの位置情報が上書きされる
# conf_t_label:各DBoxに一番近いBBoxのラベルが上書きされる
# ただし、一番近いBBoxとのjaccard overlapが0.5より小さい場合は
# 正解BBoxのラベルconf_t_labelは背景クラスの0とする
variance = [0.1, 0.2]
# このvarianceはDBoxからBBoxに補正計算する際に使用する式の係数です
match(self.jaccard_thresh, truths, dbox,
variance, labels, loc_t, conf_t_label, idx)
# ----------
# 位置の損失:loss_lを計算
# Smooth L1関数で損失を計算する。ただし、物体を発見したDBoxのオフセットのみを計算する
# ----------
# 物体を検出したBBoxを取り出すマスクを作成
pos_mask = conf_t_label > 0 # torch.Size([num_batch, 8732])
# pos_maskをloc_dataのサイズに変形
pos_idx = pos_mask.unsqueeze(pos_mask.dim()).expand_as(loc_data)
# Positive DBoxのloc_dataと、教師データloc_tを取得
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
# 物体を発見したPositive DBoxのオフセット情報loc_tの損失(誤差)を計算
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
# ----------
# クラス予測の損失:loss_cを計算
# 交差エントロピー誤差関数で損失を計算する。ただし、背景クラスが正解であるDBoxが圧倒的に多いので、
# Hard Negative Miningを実施し、物体発見DBoxと背景クラスDBoxの比が1:3になるようにする。
# そこで背景クラスDBoxと予想したもののうち、損失が小さいものは、クラス予測の損失から除く
# ----------
batch_conf = conf_data.view(-1, num_classes)
# クラス予測の損失を関数を計算(reduction='none'にして、和をとらず、次元をつぶさない)
loss_c = F.cross_entropy(
batch_conf, conf_t_label.view(-1), reduction='none')
# -----------------
# これからNegative DBoxのうち、Hard Negative Miningで抽出するものを求めるマスクを作成します
# -----------------
# 物体発見したPositive DBoxの損失を0にする
# (注意)物体はlabelが1以上になっている。ラベル0は背景。
num_pos = pos_mask.long().sum(1, keepdim=True) # ミニバッチごとの物体クラス予測の数
loss_c = loss_c.view(num_batch, -1) # torch.Size([num_batch, 8732])
loss_c[pos_mask] = 0 # 物体を発見したDBoxは損失0とする
# Hard Negative Miningを実施する
# 各DBoxの損失の大きさloss_cの順位であるidx_rankを求める
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
# (注釈)
# 実装コードがかなり特殊で直感的ではないです。
# 上記2行は、要は各DBoxに対して、損失の大きさが何番目なのかの情報を
# 変数idx_rankとして高速に取得したいというコードです。
#
# DBOXの損失値の大きい方から降順に並べ、DBoxの降順のindexをloss_idxに格納。
# 損失の大きさloss_cの順位であるidx_rankを求める。
# ここで、
# 降順になった配列indexであるloss_idxを、0から8732まで昇順に並べ直すためには、
# 何番目のloss_idxのインデックスをとってきたら良いのかを示すのが、idx_rankである。
# 例えば、
# idx_rankの要素0番目 = idx_rank[0]を求めるには、loss_idxの値が0の要素、
# つまりloss_idx[?}=0 の、?は何番かを求めることになる。ここで、? = idx_rank[0]である。
# いま、loss_idx[?]=0の0は、元のloss_cの要素の0番目という意味である。
# つまり?は、元のloss_cの要素0番目は、降順に並び替えられたloss_idxの何番目ですか
# を求めていることになり、 結果、
# ? = idx_rank[0] はloss_cの要素0番目が、降順の何番目かを示すことになる。
# 背景のDBoxの数num_negを決める。HardNegative Miningにより、
# 物体発見のDBoxの数num_posの3倍(self.negpos_ratio倍)とする。
# ただし、万が一、DBoxの数を超える場合は、DBoxの数を上限とする
num_neg = torch.clamp(num_pos*self.negpos_ratio, max=num_dbox)
# idx_rankは各DBoxの損失の大きさが上から何番目なのかが入っている
# 背景のDBoxの数num_negよりも、順位が低い(すなわち損失が大きい)DBoxを取るマスク作成
# torch.Size([num_batch, 8732])
neg_mask = idx_rank < (num_neg).expand_as(idx_rank)
# -----------------
# (終了)これからNegative DBoxのうち、Hard Negative Miningで抽出するものを求めるマスクを作成します
# -----------------
# マスクの形を整形し、conf_dataに合わせる
# pos_idx_maskはPositive DBoxのconfを取り出すマスクです
# neg_idx_maskはHard Negative Miningで抽出したNegative DBoxのconfを取り出すマスクです
# pos_mask:torch.Size([num_batch, 8732])→pos_idx_mask:torch.Size([num_batch, 8732, 21])
pos_idx_mask = pos_mask.unsqueeze(2).expand_as(conf_data)
neg_idx_mask = neg_mask.unsqueeze(2).expand_as(conf_data)
# conf_dataからposとnegだけを取り出してconf_hnmにする。形はtorch.Size([num_pos+num_neg, 21])
conf_hnm = conf_data[(pos_idx_mask+neg_idx_mask).gt(0)
].view(-1, num_classes)
# (注釈)gtは greater than (>)の略称。これでmaskが1のindexを取り出す。
# pos_idx_mask+neg_idx_maskは足し算だが、indexへのmaskをまとめているだけである。
# つまり、posであろうがnegであろうが、マスクが1のものを足し算で一つのリストにし、それをgtで取得
# 同様に教師データであるconf_t_labelからposとnegだけを取り出してconf_t_label_hnmに
# 形はtorch.Size([pos+neg])になる
conf_t_label_hnm = conf_t_label[(pos_mask+neg_mask).gt(0)]
# confidenceの損失関数を計算(要素の合計=sumを求める)
loss_c = F.cross_entropy(conf_hnm, conf_t_label_hnm, reduction='sum')
# 物体を発見したBBoxの数N(全ミニバッチの合計)で損失を割り算
N = num_pos.sum()
loss_l /= N
loss_c /= N
return loss_l, loss_c
| StarcoderdataPython |
131217 | <gh_stars>0
####################################
# MonogusaTools
# v.1.0
# (c)isidourou 2013
####################################
#!BPY
import bpy
import random
from bpy.types import Menu, Panel
bl_info = {
"name": "Monogusa Tools",
"author": "isidourou",
"version": (1, 0),
"blender": (2, 65, 0),
"location": "View3D > Toolbar",
"description": "MonogusaTools",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": 'CTNAME'}
atobj = None
def mode_interpret(emode):
if emode == 'PAINT_TEXTURE':
return 'TEXTURE_PAINT'
if emode == 'SCULPT':
return 'SCULPT'
if emode == 'PAINT_VERTEX':
return 'VERTEX_PAINT'
if emode == 'PAINT_WEIGHT':
return 'WEIGHT_PAINT'
if emode == 'OBJECT':
return 'OBJECT'
if emode == 'POSE':
return 'POSE'
if emode=='EDIT_MESH' or emode=='EDIT_ARMATURE' or emode=='EDIT_CURVE' or emode=='EDIT_TEXT' or emode=='EDIT_METABALL' or emode=='EDIT_SURFACE':
return 'EDIT'
def check_active():
count = 0
slist = bpy.context.selected_objects
for i in slist:
count += 1
return count
def check_mode():
emode = bpy.context.mode
if emode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
return emode
# Menu in tools region
class MonogusaToolsPanel(bpy.types.Panel):
bl_label = "Monogusa Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
#3D Cursor
col = layout.column(align=True)
col.label(text="3d cursor:")
row = col.row(align=True)
row.operator("to.selected", text="to Selected")
row.operator("to.cursor", text="to Cursor")
#select
col = layout.column(align=True)
col.label(text="Select:")
row = col.row(align=True)
row.operator("select.type", text="Type")
row.operator("select.group", text="Group")
row.operator("select.obdata", text="OBData")
row.operator("select.mat", text="Mat")
row = col.row(align=True)
row.operator("select.invert", text="Invert")
row.operator("select.all", text=" All")
row.operator("deselect.all", text="Deselect")
#execute
#col = layout.column(align=True)
col.label(text="Execute:")
row = col.row(align=True)
row.operator("hide.selected", text="Hide")
row.operator("unhide.all", text="Unhide")
row.operator("execute.delete", text="Delete")
#sendlayer layer
col.label(text="Move to Layer:")
row = col.row(align=True)
row.operator("sendlayer.l00",text=' ')
row.operator("sendlayer.l01",text=' ')
row.operator("sendlayer.l02",text=' ')
row.operator("sendlayer.l03",text=' ')
row.operator("sendlayer.l04",text=' ')
row.operator("sendlayer.l05",text=' ')
row.operator("sendlayer.l06",text=' ')
row.operator("sendlayer.l07",text=' ')
row.operator("sendlayer.l08",text=' ')
row.operator("sendlayer.l09",text=' ')
row = col.row(align=True)
row.operator("sendlayer.l10",text=' ')
row.operator("sendlayer.l11",text=' ')
row.operator("sendlayer.l12",text=' ')
row.operator("sendlayer.l13",text=' ')
row.operator("sendlayer.l14",text=' ')
row.operator("sendlayer.l15",text=' ')
row.operator("sendlayer.l16",text=' ')
row.operator("sendlayer.l17",text=' ')
row.operator("sendlayer.l18",text=' ')
row.operator("sendlayer.l19",text=' ')
#convert
col = layout.column(align=True)
col.label(text="Convert:")
row = col.row(align=True)
row.operator("convert.tomesh", text="to Mesh")
row.operator("convert.tocurve", text="to Curve")
#subdivide
col = layout.column(align=True)
col.label(text="Sub Divide:")
row = col.row(align=True)
row.operator("div.simple", text="Simple Divide")
row = col.row(align=True)
row.operator("div.smooth", text="Smooth Div")
row.operator("div.rand", text="Random Div")
row = col.row(align=False)
row.operator("ver.smooth", text="Smoothing Vertex / Points")
#add mirror modifire
col = layout.column(align=True)
col = layout.column(align=True)
col.label(text="Add Mirror Modifier:")
row = col.row(align=True)
row.operator("add.mmx", text="X")
row.operator("add.mmy", text="Y")
row.operator("add.mmz", text="Z")
row = col.row(align=True)
row.operator("add.mmmx", text="-X")
row.operator("add.mmmy", text="-Y")
row.operator("add.mmmz", text="-Z")
#add mirror modifire
col = layout.column(align=True)
col.label(text="Set Template Empty:")
row = col.row(align=True)
row.operator("temp.single", text="Single")
row.operator("temp.separate", text="3D Separate")
row.operator("temp.contact", text="3D Contact")
#---- main ------
#select
class SelectType(bpy.types.Operator):
bl_idname = "select.type"
bl_label = "SelectType"
def execute(self, context):
check_mode()
if check_active() == 0:
return{'FINISHED'}
bpy.ops.object.select_grouped(type='TYPE')
return{'FINISHED'}
class SelectGroup(bpy.types.Operator):
bl_idname = "select.group"
bl_label = "SelectGroup"
def execute(self, context):
check_mode()
if check_active() == 0:
return{'FINISHED'}
bpy.ops.object.select_grouped(type='GROUP')
return{'FINISHED'}
class SelectObjdata(bpy.types.Operator):
bl_idname = "select.obdata"
bl_label = "SelectObjdata"
def execute(self, context):
check_mode()
if check_active() == 0:
return{'FINISHED'}
bpy.ops.object.select_linked(type='OBDATA')
return{'FINISHED'}
class SelectMat(bpy.types.Operator):
bl_idname = "select.mat"
bl_label = "SelectMat"
def execute(self, context):
check_mode()
if check_active() == 0:
return{'FINISHED'}
bpy.ops.object.select_linked(type='MATERIAL')
return{'FINISHED'}
class SelectInvert(bpy.types.Operator):
bl_idname = "select.invert"
bl_label = "SelectInvert"
def execute(self, context):
cobj = bpy.context.object
if cobj == None:
return{'FINISHED'}
objtype = cobj.type
emode = bpy.context.mode
emode = mode_interpret(emode)
if objtype == 'MESH':
if emode == 'EDIT':
bpy.ops.mesh.select_all(action='INVERT')
if objtype == 'CURVE' or objtype == 'SURFACE':
if emode == 'EDIT':
bpy.ops.curve.select_all(action='INVERT')
if objtype == 'ARMATURE':
if emode == 'POSE':
bpy.ops.pose.select_all(action='INVERT')
if emode == 'EDIT':
bpy.ops.armature.select_all(action='INVERT')
if objtype == 'META':
if emode == 'EDIT':
bpy.ops.mball.select_all(action='INVERT')
if emode == 'OBJECT':
bpy.ops.object.select_all(action='INVERT')
return{'FINISHED'}
class SelectAll(bpy.types.Operator):
bl_idname = "select.all"
bl_label = "SelectAll"
def execute(self, context):
cobj = bpy.context.object
if cobj == None:
return{'FINISHED'}
objtype = cobj.type
emode = bpy.context.mode
emode = mode_interpret(emode)
if objtype == 'MESH':
if emode == 'EDIT':
bpy.ops.mesh.select_all(action='SELECT')
if objtype == 'CURVE' or objtype == 'SURFACE':
if emode == 'EDIT':
bpy.ops.curve.select_all(action='SELECT')
if objtype == 'ARMATURE':
if emode == 'POSE':
bpy.ops.pose.select_all(action='SELECT')
if emode == 'EDIT':
bpy.ops.armature.select_all(action='SELECT')
if objtype == 'META':
if emode == 'EDIT':
bpy.ops.mball.select_all(action='SELECT')
if emode == 'OBJECT':
bpy.ops.object.select_all(action='SELECT')
return{'FINISHED'}
class DeselectAll(bpy.types.Operator):
bl_idname = "deselect.all"
bl_label = "DeselectAll"
def execute(self, context):
cobj = bpy.context.object
if cobj == None:
return{'FINISHED'}
objtype = cobj.type
emode = bpy.context.mode
emode = mode_interpret(emode)
if objtype == 'MESH':
if emode == 'EDIT':
bpy.ops.mesh.select_all(action='DESELECT')
if objtype == 'CURVE' or objtype == 'SURFACE':
if emode == 'EDIT':
bpy.ops.curve.select_all(action='DESELECT')
if objtype == 'ARMATURE':
if emode == 'POSE':
bpy.ops.pose.select_all(action='DESELECT')
if emode == 'EDIT':
bpy.ops.armature.select_all(action='DESELECT')
if objtype == 'META':
if emode == 'EDIT':
bpy.ops.mball.select_all(action='DESELECT')
if emode == 'OBJECT':
bpy.ops.object.select_all(action='DESELECT')
return{'FINISHED'}
#execute
class HideSelected(bpy.types.Operator):
bl_idname = "hide.selected"
bl_label = "HideSelected"
def execute(self, context):
global atobj
cobj = bpy.context.object
if cobj == None:
return{'FINISHED'}
objtype = cobj.type
emode = bpy.context.mode
emode = mode_interpret(emode)
if objtype == 'MESH':
if emode == 'EDIT':
bpy.ops.mesh.hide(unselected=False)
if objtype == 'CURVE' or objtype == 'SURFACE':
if emode == 'EDIT':
bpy.ops.curve.hide(unselected=False)
if objtype == 'ARMATURE':
if emode == 'POSE':
bpy.ops.pose.hide(unselected=False)
if emode == 'EDIT':
bpy.ops.armature.hide(unselected=False)
if objtype == 'META':
if emode == 'EDIT':
bpy.ops.mball.hide_metaelems(unselected=False)
if emode == 'OBJECT':
bpy.ops.object.hide_view_set(unselected=False)
atobj = cobj
return{'FINISHED'}
class UnhideAll(bpy.types.Operator):
bl_idname = "unhide.all"
bl_label = "UnhideAll"
def execute(self, context):
global atobj
cobj = bpy.context.object
if cobj == None:
bpy.context.scene.objects.active = atobj
obj=bpy.context.object
obj.select = True
emode = bpy.context.mode
emode = mode_interpret(emode)
if emode == 'OBJECT':
#bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.hide_view_clear()
return{'FINISHED'}
objtype = bpy.context.object.type
if objtype == 'MESH':
if emode == 'EDIT':
bpy.ops.mesh.reveal()
if objtype == 'CURVE' or objtype == 'SURFACE':
if emode == 'EDIT':
bpy.ops.curve.reveal()
if objtype == 'ARMATURE':
if emode == 'POSE':
bpy.ops.pose.reveal()
if emode == 'EDIT':
bpy.ops.armature.reveal()
if objtype == 'META':
if emode == 'EDIT':
bpy.ops.mball.reveal_metaelems()
return{'FINISHED'}
class ExecuteDelete(bpy.types.Operator):
bl_idname = "execute.delete"
bl_label = "ExecuteDelete"
def execute(self, context):
emode = bpy.context.mode
emode = mode_interpret(emode)
if emode == 'OBJECT':
bpy.ops.object.delete(use_global=False)
return{'FINISHED'}
objtype = bpy.context.object.type
if objtype == 'MESH':
if emode == 'EDIT':
bpy.ops.mesh.delete()
if objtype == 'CURVE' or objtype == 'SURFACE':
if emode == 'EDIT':
bpy.ops.curve.delete()
if objtype == 'ARMATURE':
if emode == 'POSE':
bpy.ops.object.editmode_toggle()
bpy.ops.armature.delete()
bpy.ops.object.posemode_toggle()
if emode == 'EDIT':
bpy.ops.armature.delete()
if objtype == 'META':
if emode == 'EDIT':
bpy.ops.mball.delete_metaelems()
return{'FINISHED'}
#move to Layer
class Send00(bpy.types.Operator):
bl_idname = "sendlayer.l00"
bl_label = "Send00"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(True,False,False,False,False,False,False,False,False,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send01(bpy.types.Operator):
bl_idname = "sendlayer.l01"
bl_label = "Send01"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,True,False,False,False,False,False,False,False,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send02(bpy.types.Operator):
bl_idname = "sendlayer.l02"
bl_label = "Send02"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,True,False,False,False,False,False,False,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send03(bpy.types.Operator):
bl_idname = "sendlayer.l03"
bl_label = "Send03"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,True,False,False,False,False,False,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send04(bpy.types.Operator):
bl_idname = "sendlayer.l04"
bl_label = "Send04"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,True,False,False,False,False,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send05(bpy.types.Operator):
bl_idname = "sendlayer.l05"
bl_label = "Send05"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,True,False,False,False,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send06(bpy.types.Operator):
bl_idname = "sendlayer.l06"
bl_label = "Send06"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,True,False,False,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send07(bpy.types.Operator):
bl_idname = "sendlayer.l07"
bl_label = "Send07"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,True,False,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send08(bpy.types.Operator):
bl_idname = "sendlayer.l08"
bl_label = "Send08"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,True,False,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send09(bpy.types.Operator):
bl_idname = "sendlayer.l09"
bl_label = "Send09"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,True,
False,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send10(bpy.types.Operator):
bl_idname = "sendlayer.l10"
bl_label = "Send10"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
True,False,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send11(bpy.types.Operator):
bl_idname = "sendlayer.l11"
bl_label = "Send11"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,True,False,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send12(bpy.types.Operator):
bl_idname = "sendlayer.l12"
bl_label = "Send12"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,False,True,False,False,False,False,False,False,False))
return{'FINISHED'}
class Send13(bpy.types.Operator):
bl_idname = "sendlayer.l13"
bl_label = "Send13"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,False,False,True,False,False,False,False,False,False))
return{'FINISHED'}
class Send14(bpy.types.Operator):
bl_idname = "sendlayer.l14"
bl_label = "Send14"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,False,False,False,True,False,False,False,False,False))
return{'FINISHED'}
class Send15(bpy.types.Operator):
bl_idname = "sendlayer.l15"
bl_label = "Send15"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,False,False,False,False,True,False,False,False,False))
return{'FINISHED'}
class Send16(bpy.types.Operator):
bl_idname = "sendlayer.l16"
bl_label = "Send16"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,False,False,False,False,False,True,False,False,False))
return{'FINISHED'}
class Send17(bpy.types.Operator):
bl_idname = "sendlayer.l17"
bl_label = "Send17"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,False,False,False,False,False,False,True,False,False))
return{'FINISHED'}
class Send18(bpy.types.Operator):
bl_idname = "sendlayer.l18"
bl_label = "Send18"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,False,False,False,False,False,False,False,True,False))
return{'FINISHED'}
class Send19(bpy.types.Operator):
bl_idname = "sendlayer.l19"
bl_label = "Send19"
def execute(self, context):
check_mode()
bpy.ops.object.move_to_layer(
layers=(False,False,False,False,False,False,False,False,False,False,
False,False,False,False,False,False,False,False,False,True))
return{'FINISHED'}
#3D cursor
class ToSelected(bpy.types.Operator):
bl_idname = "to.selected"
bl_label = "ToSelected"
def execute(self, context):
bpy.ops.view3d.snap_cursor_to_selected()
return{'FINISHED'}
class ToCursor(bpy.types.Operator):
bl_idname = "to.cursor"
bl_label = "ToCursor"
def execute(self, context):
bpy.ops.view3d.snap_selected_to_cursor()
return{'FINISHED'}
#subdivide
class DivSimple(bpy.types.Operator):
bl_idname = "div.simple"
bl_label = "DivSimple"
def execute(self, context):
objtype = bpy.context.object.type
emode = bpy.context.mode
emode = mode_interpret(emode)
if objtype == 'MESH':
if emode != 'EDIT':
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.subdivide(smoothness=0)
if emode != 'EDIT':
bpy.ops.object.mode_set(mode=emode)
if objtype == 'ARMATURE':
if emode != 'EDIT':
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.armature.subdivide()
if emode != 'EDIT':
bpy.ops.object.mode_set(mode=emode)
if objtype == 'CURVE':
if emode != 'EDIT':
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.curve.subdivide()
if emode != 'EDIT':
bpy.ops.object.mode_set(mode=emode)
return{'FINISHED'}
class DivSmooth(bpy.types.Operator):
bl_idname = "div.smooth"
bl_label = "DivSmooth"
def execute(self, context):
objtype = bpy.context.object.type
emode = bpy.context.mode
emode = mode_interpret(emode)
if bpy.context.object.type == 'MESH':
if emode != 'EDIT':
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.subdivide(smoothness=1)
if emode != 'EDIT':
bpy.ops.object.mode_set(mode=emode)
return{'FINISHED'}
class DivRand(bpy.types.Operator):
bl_idname = "div.rand"
bl_label = "DivRand"
def execute(self, context):
objtype = bpy.context.object.type
emode = bpy.context.mode
emode = mode_interpret(emode)
if bpy.context.object.type == 'MESH':
if emode != 'EDIT':
bpy.ops.object.mode_set(mode='EDIT')
frc = random.random()*6
sed = int(random.random()*10)
bpy.ops.mesh.subdivide(smoothness=0, fractal=frc, seed=sed)
if emode != 'EDIT':
bpy.ops.object.mode_set(mode=emode)
return{'FINISHED'}
class VerSmooth(bpy.types.Operator):
bl_idname = "ver.smooth"
bl_label = "DivSmooth"
def execute(self, context):
objtype = bpy.context.object.type
emode = bpy.context.mode
emode = mode_interpret(emode)
if bpy.context.object.type == 'MESH':
if emode != 'EDIT':
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.vertices_smooth()
if emode != 'EDIT':
bpy.ops.object.mode_set(mode=emode)
if objtype == 'CURVE':
if emode != 'EDIT':
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.curve.smooth()
if emode != 'EDIT':
bpy.ops.object.mode_set(mode=emode)
return{'FINISHED'}
#convert
class ConverttoMesh(bpy.types.Operator):
bl_idname = "convert.tomesh"
bl_label = "ConverttoMesh"
def execute(self, context):
objtype = bpy.context.object.type
emode = bpy.context.mode
if emode == 'SCULPT' or emode.find('PAINT') != -1:
return{'FINISHED'}
emode = mode_interpret(emode)
if objtype == 'CURVE' or objtype == 'FONT' or objtype == 'META' or objtype == 'SURFACE':
if emode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.convert(target='MESH')
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.object.editmode_toggle()
if emode != 'OBJECT':
bpy.ops.object.mode_set(mode=emode)
return{'FINISHED'}
class ConverttoCurve(bpy.types.Operator):
bl_idname = "convert.tocurve"
bl_label = "ConverttoCurve"
def execute(self, context):
objtype = bpy.context.object.type
emode = bpy.context.mode
if emode == 'SCULPT' or emode.find('PAINT') != -1:
return{'FINISHED'}
emode = mode_interpret(emode)
if objtype == 'MESH' or objtype == 'FONT':
if emode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.convert(target='CURVE')
if emode != 'OBJECT':
bpy.ops.object.mode_set(mode=emode)
return{'FINISHED'}
#add mirror modifier
def add_mm(direction):
emode = bpy.context.mode
emode = mode_interpret(emode)
obj = bpy.ops.object
cobj = bpy.context.object
mesh = cobj.data
obj.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
obj.mode_set(mode='OBJECT')
ct = 0
exist = False
for i in cobj.modifiers:
s = cobj.modifiers[ct].name
if s.find('Mirror') != -1:
exist = True
break
if exist == False:
obj.modifier_add(type='MIRROR')
if direction == 'X':
for vertex in mesh.vertices:
if (vertex.co.x < -0.000001):
vertex.select = True
cobj.modifiers["Mirror"].use_x = True
if exist == False:
cobj.modifiers["Mirror"].use_y = False
cobj.modifiers["Mirror"].use_z = False
if direction == '-X':
for vertex in mesh.vertices:
if (vertex.co.x > 0.000001):
vertex.select = True
cobj.modifiers["Mirror"].use_x = True
if exist == False:
cobj.modifiers["Mirror"].use_y = False
cobj.modifiers["Mirror"].use_z = False
if direction == 'Y':
for vertex in mesh.vertices:
if (vertex.co.y < -0.000001):
vertex.select = True
cobj.modifiers["Mirror"].use_y = True
if exist == False:
cobj.modifiers["Mirror"].use_x = False
cobj.modifiers["Mirror"].use_z = False
if direction == '-Y':
for vertex in mesh.vertices:
if (vertex.co.y > 0.000001):
vertex.select = True
cobj.modifiers["Mirror"].use_y = True
if exist == False:
cobj.modifiers["Mirror"].use_x = False
cobj.modifiers["Mirror"].use_z = False
if direction == 'Z':
for vertex in mesh.vertices:
if (vertex.co.z < -0.000001):
vertex.select = True
cobj.modifiers["Mirror"].use_z = True
if exist == False:
cobj.modifiers["Mirror"].use_x = False
cobj.modifiers["Mirror"].use_y = False
if direction == '-Z':
for vertex in mesh.vertices:
if (vertex.co.z > 0.000001):
vertex.select = True
cobj.modifiers["Mirror"].use_z = True
if exist == False:
cobj.modifiers["Mirror"].use_x = False
cobj.modifiers["Mirror"].use_y = False
cobj.modifiers["Mirror"].use_clip = True
obj.mode_set(mode='EDIT')
bpy.ops.mesh.delete(type='VERT')
bpy.ops.mesh.select_all(action='SELECT')
obj.mode_set(mode='OBJECT')
if emode != 'OBJECT':
bpy.ops.object.mode_set(mode=emode)
class AddMmx(bpy.types.Operator):
bl_idname = "add.mmx"
bl_label = "AddMmx"
def execute(self, context):
if bpy.context.object.type == 'MESH':
add_mm('X')
return{'FINISHED'}
class AddMm_x(bpy.types.Operator):
bl_idname = "add.mmmx"
bl_label = "AddMmx"
def execute(self, context):
if bpy.context.object.type == 'MESH':
add_mm('-X')
return{'FINISHED'}
class AddMmy(bpy.types.Operator):
bl_idname = "add.mmy"
bl_label = "AddMmx"
def execute(self, context):
if bpy.context.object.type == 'MESH':
add_mm('Y')
return{'FINISHED'}
class AddMm_y(bpy.types.Operator):
bl_idname = "add.mmmy"
bl_label = "AddMmx"
def execute(self, context):
if bpy.context.object.type == 'MESH':
add_mm('-Y')
return{'FINISHED'}
class AddMmz(bpy.types.Operator):
bl_idname = "add.mmz"
bl_label = "AddMmx"
def execute(self, context):
if bpy.context.object.type == 'MESH':
add_mm('Z')
return{'FINISHED'}
class AddMm_z(bpy.types.Operator):
bl_idname = "add.mmmz"
bl_label = "AddMmx"
def execute(self, context):
if bpy.context.object.type == 'MESH':
add_mm('-Z')
return{'FINISHED'}
#set template empty
def objselect(objct,selection):
if (selection == 'ONLY'):
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects.active = objct
objct.select = True
def makecenterempty():
bpy.ops.object.empty_add(type='PLAIN_AXES',
view_align=False,
location=(0, 0, 0))
centerempty = bpy.context.object
centerempty.name = 'CenterEmpty'
return centerempty
def makeempty(loc,rot):
bpy.ops.object.empty_add(type='PLAIN_AXES',
view_align=False,
location= loc,
rotation= rot
)
empty = bpy.context.object
empty.empty_draw_type = 'IMAGE'
empty.empty_draw_size = 10
empty.name = 'Template Empty'
empty.color[3] = 0.3 #Transparency
empty.show_x_ray = True
return empty
class TempSingle(bpy.types.Operator):
bl_idname = "temp.single"
bl_label = "TempSingle"
def execute(self, context):
pi = 3.141595
pq = pi/2
#sn = bpy.context.scene
erot = [(pq, 0, 0),(pq, 0, pq),(0, 0, 0)]
eloc = [(-5, 0, -5),(0, -5, -5),(-5, -5, 0)]
cempty = makecenterempty()
bpy.ops.group.create(name="TemplateEmpty")
empty = makeempty(eloc[0],erot[0])
bpy.ops.object.group_link(group='TemplateEmpty')
objselect(cempty,'ADD')
bpy.ops.object.parent_set(type='OBJECT')
objselect(cempty,'ONLY')
bpy.ops.view3d.snap_selected_to_cursor()
return{'FINISHED'}
class TempSeparate(bpy.types.Operator):
bl_idname = "temp.separate"
bl_label = "TempSeparate"
def execute(self, context):
pi = 3.141595
pq = pi/2
#sn = bpy.context.scene
erot = [(pq, 0, 0),(pq, 0, pq),(0, 0, 0)]
eloc = [(-5, 5, -5),(-5, -5, -5),(-5, -5, -5)]
cempty = makecenterempty()
bpy.ops.group.create(name="TemplateEmpty")
for i in range(3):
empty = makeempty(eloc[i],erot[i])
bpy.ops.object.group_link(group='TemplateEmpty')
objselect(cempty,'ADD')
bpy.ops.object.parent_set(type='OBJECT')
objselect(cempty,'ONLY')
bpy.ops.view3d.snap_selected_to_cursor()
return{'FINISHED'}
class TempContact(bpy.types.Operator):
bl_idname = "temp.contact"
bl_label = "TempContact"
def execute(self, context):
pi = 3.141595
pq = pi/2
#sn = bpy.context.scene
erot = [(pq, 0, 0),(pq, 0, pq),(0, 0, 0)]
eloc = [(-5, 0, -5),(0, -5, -5),(-5, -5, 0)]
cempty = makecenterempty()
bpy.ops.group.create(name="TemplateEmpty")
for i in range(3):
empty = makeempty(eloc[i],erot[i])
bpy.ops.object.group_link(group='TemplateEmpty')
objselect(cempty,'ADD')
bpy.ops.object.parent_set(type='OBJECT')
objselect(cempty,'ONLY')
bpy.ops.view3d.snap_selected_to_cursor()
return{'FINISHED'}
# Registration
def register():
bpy.utils.register_class(MonogusaToolsPanel)
#select
bpy.utils.register_class(SelectType)
bpy.utils.register_class(SelectGroup)
bpy.utils.register_class(SelectObjdata)
bpy.utils.register_class(SelectMat)
bpy.utils.register_class(SelectInvert)
bpy.utils.register_class(SelectAll)
bpy.utils.register_class(DeselectAll)
#execute
bpy.utils.register_class(HideSelected)
bpy.utils.register_class(UnhideAll)
bpy.utils.register_class(ExecuteDelete)
#move to layer
bpy.utils.register_class(Send00)
bpy.utils.register_class(Send01)
bpy.utils.register_class(Send02)
bpy.utils.register_class(Send03)
bpy.utils.register_class(Send04)
bpy.utils.register_class(Send05)
bpy.utils.register_class(Send06)
bpy.utils.register_class(Send07)
bpy.utils.register_class(Send08)
bpy.utils.register_class(Send09)
bpy.utils.register_class(Send10)
bpy.utils.register_class(Send11)
bpy.utils.register_class(Send12)
bpy.utils.register_class(Send13)
bpy.utils.register_class(Send14)
bpy.utils.register_class(Send15)
bpy.utils.register_class(Send16)
bpy.utils.register_class(Send17)
bpy.utils.register_class(Send18)
bpy.utils.register_class(Send19)
#3d cursor
bpy.utils.register_class(ToSelected)
bpy.utils.register_class(ToCursor)
#subdvide
bpy.utils.register_class(DivSimple)
bpy.utils.register_class(DivSmooth)
bpy.utils.register_class(DivRand)
bpy.utils.register_class(VerSmooth)
bpy.utils.register_class(ConverttoMesh)
bpy.utils.register_class(ConverttoCurve)
bpy.utils.register_class(AddMmx)
bpy.utils.register_class(AddMm_x)
bpy.utils.register_class(AddMmy)
bpy.utils.register_class(AddMm_y)
bpy.utils.register_class(AddMmz)
bpy.utils.register_class(AddMm_z)
#set template empty
bpy.utils.register_class(TempSingle)
bpy.utils.register_class(TempSeparate)
bpy.utils.register_class(TempContact)
def unregister():
bpy.utils.unregister_class(MonogusaToolsPanel)
#select
bpy.utils.unregister_class(SelectType)
bpy.utils.unregister_class(SelectGroup)
bpy.utils.unregister_class(SelectObjdata)
bpy.utils.unregister_class(SelectMat)
bpy.utils.unregister_class(SelectInvert)
bpy.utils.unregister_class(SelectAll)
bpy.utils.unregister_class(DeselectAll)
#execute
bpy.utils.unregister_class(HideSelected)
bpy.utils.unregister_class(UnhideAll)
bpy.utils.unregister_class(ExecuteDelete)
#move to layer
bpy.utils.unregister_class(Send00)
bpy.utils.unregister_class(Send01)
bpy.utils.unregister_class(Send02)
bpy.utils.unregister_class(Send03)
bpy.utils.unregister_class(Send04)
bpy.utils.unregister_class(Send05)
bpy.utils.unregister_class(Send06)
bpy.utils.unregister_class(Send07)
bpy.utils.unregister_class(Send08)
bpy.utils.unregister_class(Send09)
bpy.utils.unregister_class(Send10)
bpy.utils.unregister_class(Send11)
bpy.utils.unregister_class(Send12)
bpy.utils.unregister_class(Send13)
bpy.utils.unregister_class(Send14)
bpy.utils.unregister_class(Send15)
bpy.utils.unregister_class(Send16)
bpy.utils.unregister_class(Send17)
bpy.utils.unregister_class(Send18)
bpy.utils.unregister_class(Send19)
#3d cursor
bpy.utils.unregister_class(ToSelected)
bpy.utils.unregister_class(ToCursor)
#subdvide
bpy.utils.unregister_class(DivSimple)
bpy.utils.unregister_class(DivSmooth)
bpy.utils.unregister_class(DivRand)
bpy.utils.unregister_class(VerSmooth)
bpy.utils.unregister_class(ConverttoMesh)
bpy.utils.unregister_class(ConverttoCurve)
bpy.utils.unregister_class(AddMmx)
bpy.utils.unregister_class(AddMm_x)
bpy.utils.unregister_class(AddMmy)
bpy.utils.unregister_class(AddMm_y)
bpy.utils.unregister_class(AddMmz)
bpy.utils.unregister_class(AddMm_z)
#set template empty
bpy.utils.unregister_class(TempSingle)
bpy.utils.unregister_class(TempSeparate)
bpy.utils.unregister_class(TempContact)
if __name__ == "__main__":
register() | StarcoderdataPython |
3318231 | <gh_stars>1-10
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from ....core.ops.nonlinearities import HSwish
from ...registry import SPATIAL_TEMPORAL_MODULES
class TRGLayer(nn.Module):
"""Based on TRG network: https://arxiv.org/pdf/1908.09995.pdf
"""
def __init__(self, num_channels, num_heads, embed_size, spatial_size, temporal_size):
super(TRGLayer, self).__init__()
self.out_channels = num_channels
self.num_heads = num_heads
self.embed_size = embed_size
self.spatial_size = np.prod(spatial_size) if not isinstance(spatial_size, int) else spatial_size ** 2
self.temporal_size = temporal_size
self.factor = 1.0 / np.sqrt(self.spatial_size * self.embed_size)
self.theta = self._project(num_channels, embed_size * num_heads)
self.phi = self._project(num_channels, embed_size * num_heads)
self.g = nn.Sequential(
self._project(num_channels, num_channels * num_heads, with_bn=True),
HSwish())
self.adjacent_softmax = nn.Softmax(dim=-1)
self.gcn_non_linearity = HSwish()
self.heads_weights = nn.Parameter(torch.Tensor(num_heads, num_heads))
self.heads_weights.data.normal_()
self.heads_softmax = nn.Softmax(dim=-1)
self.out_non_linearity = HSwish()
@staticmethod
def _project(in_channels, out_channels, with_bn=False):
layers = [nn.Conv3d(in_channels, in_channels, bias=False, groups=in_channels,
kernel_size=(1, 3, 3), padding=(0, 1, 1), stride=1),
nn.BatchNorm3d(in_channels),
HSwish(),
nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=not with_bn)]
if with_bn:
layers.append(nn.BatchNorm3d(out_channels, eps=1e-05, momentum=0.9, affine=True))
return nn.Sequential(*layers)
def forward(self, x):
theta = self.theta(x) \
.view(-1, self.num_heads, self.embed_size, self.temporal_size, self.spatial_size) \
.permute(0, 1, 3, 2, 4) \
.reshape(-1, self.num_heads, self.temporal_size, self.embed_size * self.spatial_size)
phi = self.phi(x) \
.view(-1, self.num_heads, self.embed_size, self.temporal_size, self.spatial_size) \
.permute(0, 1, 2, 4, 3) \
.reshape(-1, self.num_heads, self.embed_size * self.spatial_size, self.temporal_size)
g = self.g(x) \
.view(-1, self.num_heads, self.out_channels, self.temporal_size, self.spatial_size) \
.permute(0, 1, 3, 2, 4) \
.reshape(-1, self.num_heads, self.temporal_size, self.out_channels * self.spatial_size)
adjacent_matrix = self.adjacent_softmax(self.factor * torch.matmul(theta, phi))
gcn = self.gcn_non_linearity(torch.matmul(adjacent_matrix, g))
heads_features = gcn.mean(dim=-1).view(-1, self.num_heads, self.temporal_size).permute(0, 2, 1)
heads_attention = self.heads_softmax(torch.matmul(heads_features, self.heads_weights))
z = torch.matmul(gcn.permute(0, 2, 3, 1), heads_attention.view(-1, self.temporal_size, self.num_heads, 1)) \
.view(-1, self.temporal_size, self.out_channels, self.spatial_size) \
.permute(0, 2, 1, 3) \
.reshape_as(x)
out = self.out_non_linearity(x + z)
return out
@SPATIAL_TEMPORAL_MODULES.register_module
class TRGSpatialTemporalModule(nn.Module):
def __init__(self, in_channels, out_channels, num_layers=4,
num_heads=8, embed_size=32, spatial_size=7, temporal_size=1):
super(TRGSpatialTemporalModule, self).__init__()
layers = []
if out_channels != in_channels:
layers.extend([nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm3d(out_channels, eps=1e-05, momentum=0.9, affine=True)])
layers.extend([TRGLayer(out_channels, num_heads, embed_size, spatial_size, temporal_size)
for _ in range(num_layers)])
self.features = nn.Sequential(*layers)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm3d):
constant_init(m, 1.0, 0.0)
elif isinstance(m, nn.Parameter):
m.data.normal_()
def reset_weights(self):
self.init_weights()
def forward(self, x, return_extra_data=False):
if return_extra_data:
return self.features(x), dict()
else:
return self.features(x)
| StarcoderdataPython |
5160201 | import random
import numpy as np
def average_total_reward(env, max_episodes=100, max_steps=10000000000):
'''
Runs an env object with random actions until either max_episodes or
max_steps is reached. Calculates the average total reward over the
episodes.
Reward is summed across all agents, making it unsuited for use in zero-sum
games.
'''
total_reward = 0
total_steps = 0
done = False
for episode in range(max_episodes):
if total_steps >= max_steps:
break
env.reset()
for agent in env.agent_iter():
obs, reward, done, _ = env.last()
total_reward += reward
total_steps += 1
if done:
action = None
elif isinstance(obs, dict) and 'action_mask' in obs:
action = random.choice(np.flatnonzero(obs['action_mask']))
else:
action = env.action_spaces[agent].sample()
env.step(action)
num_episodes = episode + 1
print("Average total reward", total_reward / num_episodes)
return total_reward / num_episodes
| StarcoderdataPython |
3458340 | <reponame>glide23/dl
import numpy as np
# Linear Least Squares method implemented in numpy, for *invertible* X matrices only
# input:
# X - a matrix which rows hold our data's samples
# y_true - a vector which cells hold the groundtruth value for each sample
# output:
# the weights vector for each dimension of the input
def lls(X, y_true):
X = np.array(X)
y_true = np.array(y_true)
W = np.linalg.inv(np.matmul(X.transpose(), X))
W = np.matmul(W,X.transpose())
W = np.matmul(W,y_true)
return W
# sample code
X = [[1,3],[2,5]]
y_true = [5,9]
print("calculated weights:",lls(X,y_true))
| StarcoderdataPython |
5124272 | from unittest.mock import ANY
import pytest
from moto import mock_ec2, mock_iam, mock_sts
from itertools import islice
from cloudwanderer import URN
from cloudwanderer.aws_interface.models import AWSResourceTypeFilter
from cloudwanderer.exceptions import UnsupportedResourceTypeError, UnsupportedServiceError
from ...pytest_helpers import compare_dict_allow_any, create_iam_policy, create_iam_role
@mock_ec2
@mock_sts
def test_get_resources_of_type_in_region_eu_west_2(aws_interface):
result = list(
aws_interface.get_resources(
service_name="ec2",
resource_type="vpc",
region="eu-west-2",
)
)[0]
compare_dict_allow_any(
dict(result),
{
"cidr_block": "172.31.0.0/16",
"cidr_block_association_set": ANY,
"cloudwanderer_metadata": {
"CidrBlock": "172.31.0.0/16",
"CidrBlockAssociationSet": [
{
"AssociationId": ANY,
"CidrBlock": "172.31.0.0/16",
"CidrBlockState": {"State": "associated"},
}
],
"DhcpOptionsId": ANY,
"EnableDnsSupport": True,
"InstanceTenancy": "default",
"Ipv6CidrBlockAssociationSet": [],
"IsDefault": True,
"OwnerId": ANY,
"State": "available",
"Tags": [],
"VpcId": ANY,
},
"dependent_resource_urns": [],
"dhcp_options_id": ANY,
"discovery_time": ANY,
"enable_dns_support": True,
"instance_tenancy": "default",
"ipv6_cidr_block_association_set": [],
"is_default": True,
"owner_id": ANY,
"parent_urn": None,
"relationships": ANY,
"state": "available",
"tags": [],
"urn": ANY,
"vpc_id": ANY,
},
)
@mock_iam
@mock_sts
def test_get_resources_of_type_in_region_us_east_1(aws_interface):
create_iam_role()
result = list(aws_interface.get_resources(service_name="iam", resource_type="role", region="us-east-1"))[0]
compare_dict_allow_any(
dict(result),
{
"urn": URN(
cloud_name="aws",
account_id="123456789012",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy"],
),
"relationships": [],
"dependent_resource_urns": [],
"parent_urn": URN(
cloud_name="aws",
account_id="123456789012",
region="us-east-1",
service="iam",
resource_type="role",
resource_id_parts=["test-role"],
),
"cloudwanderer_metadata": {
"RoleName": "test-role",
"PolicyName": "test-role-policy",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
},
"discovery_time": ANY,
"role_name": "test-role",
"policy_name": "test-role-policy",
"policy_document": {
"Version": "2012-10-17",
"Statement": {"Effect": "Allow", "Action": "s3:ListBucket", "Resource": "arn:aws:s3:::example_bucket"},
},
},
)
def test_get_resources_unsupported_service(aws_interface):
with pytest.raises(UnsupportedServiceError):
list(aws_interface.get_resources(service_name="unicorn_stable", resource_type="instance", region="eu-west-1"))
def test_get_resources_unsupported_resource_type(aws_interface):
with pytest.raises(
UnsupportedResourceTypeError,
match="Could not find Boto3 collection for unicorn",
):
list(aws_interface.get_resources(service_name="ec2", resource_type="unicorn", region="eu-west-1"))
@mock_iam
@mock_sts
def test_jmespath_filters(aws_interface):
create_iam_policy()
result = aws_interface.get_resources(
service_name="iam",
resource_type="policy",
region="us-east-1",
service_resource_type_filters=[
AWSResourceTypeFilter(
service="iam", resource_type="policy_version", jmespath_filters=["[?IsDefaultVersion==`true`]"]
)
],
)
assert list(islice((r.is_default_version for r in result if hasattr(r, "is_default_version")), 10)) == [True] * 10
# TODO: test custom and default filters
| StarcoderdataPython |
1666768 | import os
from collections import OrderedDict
import numpy as np
np.set_printoptions(suppress=True)
import matplotlib as mpl
from matplotlib import cm
import matplotlib.pyplot as plt
from time import time
from copy import copy
class designer():
def __init__(self,ff,weight,method='D'):
'''
input:
------
ff: 2-D array. Rows represent points in the pool; columns represent parameters
involved in the direvative.
weight: 1-D array. Its length equals to the total number of points in the pool,
or the numnber of rows of 'ff'.
method: The criterion used for the optimization, default is D-optimal method.
'''
self.ff = ff
self.m = ff.shape[1] # number of parameters
self.weight = weight
self.N_candidates = np.sum(weight!=0)
self.method = method
self.d = 0 # sensitivity function
self.d_max = 0 # initialize the maximum of sensitivity.
self.id_minimax = None
self.M = 0 # information matrix
self.M_inv = self.M # information matrix inverse
self.psi_iter = [] # all the optimal criteria over the iterative procedure
self.phi_iter = [] # all the sensitivity function ove the iterative procedure
self.weight_iter = []
def cal_criterion(self,local=False):
self.M = 0
for i,f in enumerate(self.ff):
self.M += self.weight[i] * np.outer(f,f)
self.M_inv = np.linalg.inv(self.M)
if self.method == 'D':
self.d = np.array([f @ self.M_inv @ f for f in self.ff])
if local==False:
self.id_minimax = np.argmax(self.d)
self.d_max = self.d[self.id_minimax]
else:
self.id_minimax = np.argmin(np.ma.array(self.d,mask=(self.weight==0)))
def collect(self):
self.psi_iter.append(np.linalg.det(self.M_inv))
self.phi_iter.append(self.d_max)
self.weight_iter.append(self.weight)
def update_design(self, alpha, action='add'):
if action == 'add':
alpha_s = alpha
elif action == 'remove':
p_s = self.weight[self.id_minimax]
alpha_s = -min(alpha, p_s/(1-p_s))
else:
print("Design not updated")
return 1
self.weight = self.weight * (1-alpha_s) # reduce current design by alpha
self.weight[self.id_minimax] += alpha_s # add the new point weighted by alpha
self.weight = self.weight / sum(self.weight) # renormalize weight
return 0
def optimize(self,verbose=False,delta=1e-5,max_steps=1e6,remove=False):
if delta == None:
threshold = 0 # no limit on "d_max"
else:
threshold = self.m / (1-delta)
# the stop condition: either maximum steps or threshold met.
stop = lambda s: s >= max_steps or self.d_max <= threshold
step = 0
self.cal_criterion(local=False)
self.collect()
while not stop(step):
step += 1
alpha = 1 / (1+step+self.N_candidates) # step length
self.cal_criterion(local=False)
if self.update_design(alpha,action='add'):
break
if remove == True:
self.cal_criterion(local=True)
if self.update_design(alpha,action='remove'):
break
self.collect()
if verbose:
print('Iteration steps: {}'.format(step))
print('criterion: {:.3f}'.format(self.m/self.d_max))
def timer():
pass
| StarcoderdataPython |
6633052 | <filename>src/OrganMatching/views.py<gh_stars>1-10
from django.http import HttpResponse
from django.shortcuts import render, redirect
from OrganMatching.misc import *
from OrganMatching.algo import *
blood_groups = ["A", "B", "AB", "O"]
rhesus_factors = ["+", "-"]
reports = ["Positive", "Negative"]
def index(request):
try:
del request.session['user']
except:
pass
return render(request, "OrganMatching/login.html")
def admin(request):
try:
return render(request, "OrganMatching/admin.html")
except:
return render(request, "OrganMatching/notadmin.html")
def submit(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
username = request.POST.get("Username")
password = <PASSWORD>("Password")
if username == "" or password == "":
return render(request, "OrganMatching/lost.html", {"Username": username, "Error": "Both fields must be filled!"})
ADMIN = "Shikhar"
############### UNCOMMENT FOR NO AUTHENTICATION ################
user_id = username
auth = True
################################################################
if auth:
if username == ADMIN:
request.session['user'] = "admin"
return redirect("admin")
else:
patient = get_content(user_id)
donors = get_donors()
if not donors:
return render(request, "OrganMatching/login.html", {"Username": username, "Error": "donors.csv has not been uploaded by the admin."})
return render(request, "OrganMatching/index.html", {"Username": username, "User_ID": user_id, "patient": patient, "donors": donors, "blood_groups": blood_groups, "rhesus_factors": rhesus_factors, "reports": reports, "range":range(len(patient) - 6), "orgreq":patient[6:]})
else:
return render(request, "OrganMatching/login.html", {"Username": username, "Error": "Your credentials are incorrect!"})
def resultview(request):
try:
final_list, donor_list = gale_shapley("static/donors.csv", "static/patients.csv")
return render(request, "OrganMatching/result.html", {"final_list": final_list})
except:
return render(request, "OrganMatching/notadmin.html")
def resultcsv(request):
try:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results.csv"'
final_list, donor_list = gale_shapley("static/donors.csv", "static/patients.csv")
writer = csv.writer(response)
writer.writerows(final_list)
return response
except:
return render(request, "OrganMatching/notadmin.html")
def upload(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
if len(request.FILES) != 2:
return render(request, "OrganMatching/admin.html", {"Error": "Choose Both Files!"})
improvise(request.FILES['file1'], request.FILES['file2'])
return render(request, "OrganMatching/uploaded.html")
def saved(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
post_data = request.POST
error = is_correct(post_data)
if error == "None":
edit_csv(post_data)
warn = ""
if int(post_data.get("Age")) < 18:
warn = "Your age is below the legally permitted age, but the form was submitted!"
return render(request, "OrganMatching/saved.html", {"Warn": warn})
else:
user_id = post_data.get("User_ID")
username = post_data.get("Username")
patient = [user_id, post_data.get("Name"), post_data.get("Blood_Report"), post_data.get("Age"), post_data.get("Blood_Group"), post_data.get("Rhesus_Factor")]
for i in range(len(post_data) - 8):
patient.append(post_data.get("Organ_Requirement" + str(i + 1)))
donors = get_donors()
return render(request, "OrganMatching/index.html", {"Username": username, "User_ID": user_id, "patient": patient, "donors": donors, "blood_groups": blood_groups, "rhesus_factors": rhesus_factors, "reports": reports, "range":range(len(patient) - 6), "orgreq":patient[6:], "Error": error})
| StarcoderdataPython |
6630565 | """Tests for adapter_filter module.
"""
import logging
import unittest
from catch.filter import adapter_filter as af
from catch.filter import candidate_probes as cp
from catch import genome
from catch import probe
from catch.utils import interval
__author__ = '<NAME> <<EMAIL>>'
class TestAdapterFilter(unittest.TestCase):
"""Tests the adapter filter output on contrived input.
"""
def setUp(self):
# Disable logging
logging.disable(logging.INFO)
# Specify default adapter sequences
self.ADAPTER_A_5END = 'ATACGCCATGCTGGGTCTCC'
self.ADAPTER_A_3END = 'CGTACTTGGGAGTCGGCCAT'
self.ADAPTER_B_5END = 'AGGCCCTGGCTGCTGATATG'
self.ADAPTER_B_3END = 'GACCTTTTGGGACAGCGGTG'
def get_filter_and_output(self, lcf_thres, mismatches, target_genomes,
input_probes, k, num_kmers_per_probe):
f = af.AdapterFilter((self.ADAPTER_A_5END, self.ADAPTER_A_3END),
(self.ADAPTER_B_5END, self.ADAPTER_B_3END),
mismatches=mismatches,
lcf_thres=lcf_thres,
kmer_probe_map_k=3)
output_probes = f.filter(input_probes, target_genomes)
return (f, output_probes)
def assert_has_adapter(self, probe, adapter):
"""Assert that probe has a particular adapter.
Args:
probe: probe to check
adapter: check if 'probe' has this adapter
Returns:
whether 'probe' starts and ends with either an 'A' or 'B'
adapter, as specified by 'adapter'
"""
if adapter == 'A':
start = self.ADAPTER_A_5END
end = self.ADAPTER_A_3END
elif adapter == 'B':
start = self.ADAPTER_B_5END
end = self.ADAPTER_B_3END
else:
raise ValueError("Unknown adapter %s" % adapter)
self.assertTrue(self.probe.seq_str.startswith(start))
self.assertTrue(self.probe.seq_str.endswith(end))
def make_probes_with_adapters(self, probe_str_a, probe_str_b):
"""Make probes with both 'A' and 'B' adapters.
Args:
probe_str_a: list of strings of the sequences of probes that
should receive an 'A' adapter
probe_str_b: list of strings of the sequences of probes that
should receive a 'B' adapter
Returns:
list of probes (instances of probe.Probe) from the input
with the corresponding adapters added
"""
probes = []
for p_str in probe_str_a:
probes += [probe.Probe.from_str(p_str).with_prepended_str(
self.ADAPTER_A_5END).with_appended_str(self.ADAPTER_A_3END)]
for p_str in probe_str_b:
probes += [probe.Probe.from_str(p_str).with_prepended_str(
self.ADAPTER_B_5END).with_appended_str(self.ADAPTER_B_3END)]
return probes
def convert_target_genomes(self, target_genomes):
"""Convert genomes to instances of genome.Genome.
Args:
target_genomes: nested list of genomes, as strings, to be
converted
Returns:
nested list of genomes, with the same structure as the input,
in which each genome is an instance of genome.Genome instead
of a string
"""
r = []
for genomes_from_group in target_genomes:
rg = []
for g in genomes_from_group:
rg += [genome.Genome.from_one_seq(g)]
r += [rg]
return r
def test_one_genome(self):
target_genomes = [['ABCDEFGHIJKLMNOPQRSTUVWXYZ']]
target_genomes = self.convert_target_genomes(target_genomes)
# Create probes of length 6 bp with a stride of 3 bp
input = []
for genomes_from_group in target_genomes:
for g in genomes_from_group:
input += cp.make_candidate_probes_from_sequences(
g.seqs,
probe_length=6,
probe_stride=3)
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'MNOPQR', 'STUVWX'],
['DEFGHI', 'JKLMNO',
'PQRSTU', 'UVWXYZ'])
self.assertCountEqual(output, desired_output)
def test_two_genomes(self):
target_genomes = [['ABCDEFGHIJKLMNOPQRSTUVWXYZ'],
['ZYXWVUTSRQPONMLKJIHGFEDCBA']]
target_genomes = self.convert_target_genomes(target_genomes)
# Create probes of length 6 bp with a stride of 3 bp
input = []
for genomes_from_group in target_genomes:
for g in genomes_from_group:
input += cp.make_candidate_probes_from_sequences(
g.seqs,
probe_length=6,
probe_stride=3)
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(
['ABCDEF', 'GHIJKL', 'MNOPQR', 'STUVWX', 'ZYXWVU', 'TSRQPO',
'NMLKJI', 'HGFEDC'], ['DEFGHI', 'JKLMNO', 'PQRSTU', 'UVWXYZ',
'WVUTSR', 'QPONML', 'KJIHGF', 'FEDCBA'])
self.assertCountEqual(output, desired_output)
def test_almost_identical_probe(self):
"""Test four probes that align like:
------ ------
------
------
where the bottom two are the same up to one mismatch. The top
two probes should be assigned adapter 'A' and the bottom two should
be assigned adapter 'B'.
"""
target_genomes = [['ABCDEFGHIJKLMNOP', 'ABCDEFGHXJKLMNOP']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'FGHIJK', 'FGHXJK', 'KLMNOP']
input = [probe.Probe.from_str(s) for s in input]
for allowed_mismatches in [0, 1]:
f, output = self.get_filter_and_output(6, allowed_mismatches,
target_genomes, input, 3,
100)
desired_output = self.make_probes_with_adapters(['ABCDEF',
'KLMNOP'],
['FGHIJK',
'FGHXJK'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
if allowed_mismatches == 0:
# Each middle probe should align to one genome
self.assertEqual(votes, [(2, 0), (0, 1), (0, 1), (2, 0)])
if allowed_mismatches == 1:
# Both middle probes should align to both genomes
self.assertEqual(votes, [(2, 0), (0, 2), (0, 2), (2, 0)])
def test_misaligned(self):
"""Test probes that align to two genomes, but in which the ones
aligning to one genome are offset from the other.
"""
target_genomes = [['ABCDEFGHIJKLMNOPQR', 'XYZABCDEFGHIJKLMNOPQR']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['XYZABC', 'ABCDEF', 'DEFGHI', 'GHIJKL', 'JKLMNO', 'MNOPQR']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
# Assume 'ABCDEF' gets 'A' adapter and 'XYZABC' gets 'B' adapter,
# and so on. But flipping the 'A' and 'B' adapters would also
# be OK.
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'MNOPQR'],
['XYZABC', 'DEFGHI',
'JKLMNO'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
self.assertEqual(votes, [(0, 1), (2, 0), (0, 2), (2, 0), (0, 2),
(2, 0)])
def test_three_genomes(self):
"""Test probes that align adjacent to each other in one genome,
but overlapping in two others. One should be assigned adapter 'A'
and the other should be assigned adapter 'B'.
"""
target_genomes = [['ABCDEFGHEFKLMN', 'ABCDEFKLMN', 'ABCDEFKLMNO']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'EFKLMN']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF'], ['EFKLMN'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
self.assertEqual(votes, [(3, 0), (1, 2)])
def test_with_mismatches(self):
target_genomes = [['ABCDEFGHIJKLMNO', 'ABCXEFGXIJKXMNO',
'ABCDEFGYYJKLMNO', 'ABCDEXGHIJKLXNO',
'ABCDEFGHIJKLMNX', 'AXCDEFGHIJKLMNO',
'ABCDEFGHIYYLMNO']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'DEFGHI', 'GHIJKL', 'JKLMNO', 'DEFGYY', 'GYYJKL',
'IYYLMN']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 1, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'GYYJKL', 'IYYLMN'],
['DEFGHI', 'JKLMNO',
'DEFGYY'])
self.assertCountEqual(output, desired_output)
def tearDown(self):
# Re-enable logging
logging.disable(logging.NOTSET)
| StarcoderdataPython |
1729057 | <gh_stars>1-10
import numpy as np
from typing import List
class Tuple(object):
__slots__ = ["val", "g", "delta"]
def __init__(self, val, g, delta):
self.val = val
self.g = g
self.delta = delta
def __repr__(self):
return '{}[{},{}]'.format(self.val, self.g, self.delta)
class GKArray(object):
def __init__(self, eps=None):
self.eps = eps
self.tuples = []
self.nSize = 0
def __repr__(self):
return str(self.tuples)
def mergeInternal(self, addTuples: List[Tuple]):
numAdded = sum([t.g for t in addTuples])
oldSize = self.nSize
self.nSize += numAdded
tuples = self.tuples
i1 = 0
n1 = len(tuples)
i2 = 0
n2 = len(addTuples)
threshold = 2.0*self.eps*self.nSize
newTuples = []
tLast = None
i = 0
while i1 < n1 or i2 < n2:
if i > 0:
tLast = newTuples[i-1]
if i1 == n1:
t2 = addTuples[i2]
tNext = t2
i2 += 1
elif i2 == n2:
t1 = tuples[i1]
tNext = t1
i1 += 1
else:
t1 = tuples[i1]
t2 = addTuples[i2]
if t1.val <= t2.val:
tNext = t1
tNext.delta += (t2.g + t2.delta - 1)
i1 += 1
else:
tNext = t2
tNext.delta += (t1.g + t1.delta - 1)
i2 += 1
if ((i > 1) and tLast.g + tNext.g + tNext.delta <= threshold):
tNext.g += tLast.g
newTuples[i-1] = tNext
else:
newTuples.append(tNext)
i += 1
self.tuples = newTuples
def add_pairs(self, val_weight_pairs):
sorted_vals = sorted(val_weight_pairs, key=lambda x: x[0])
tuples = [Tuple(vp[0], vp[1], 0) for vp in sorted_vals]
self.mergeInternal(tuples)
def get_dict(self):
return {t.val: t.g for t in self.tuples}
| StarcoderdataPython |
82066 | #
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import shutil
from typing import Text, Dict, Any
from ai_flow.plugin_interface.blob_manager_interface import BlobManager
class LocalBlobManager(BlobManager):
"""
LocalBlobManager is an implementation of BlobManager based on the local file system.
LocalBlobManager contains configuration items:
1. root_directory: The upload directory of the project.
"""
def __init__(self, config: Dict[str, Any]):
super().__init__(config)
if not self.root_dir:
raise Exception('`root_directory` option of blob manager config is not configured.')
def upload(self, local_file_path: Text) -> Text:
"""
Upload a given file to blob server. Uploaded file will be placed under self.root_dir.
:param local_file_path: the path of file to be uploaded.
:return the uri of the uploaded file in blob server.
"""
if not os.path.exists(self.root_dir):
os.makedirs(self.root_dir)
file_name = os.path.basename(local_file_path)
dest_path = os.path.join(self.root_dir, file_name)
shutil.move(local_file_path, dest_path)
return dest_path
def download(self, remote_file_path: Text, local_dir: Text = None) -> Text:
"""
Download file from remote blob server to local directory.
Only files located in self.root_dir can be downloaded by BlobManager.
:param remote_file_path: The path of file to be downloaded.
:param local_dir: the local directory.
:return the local uri of the downloaded file.
"""
self._check_remote_path_legality(remote_file_path)
if local_dir is not None:
file_name = os.path.basename(remote_file_path)
dest_path = os.path.join(local_dir, file_name)
if remote_file_path != dest_path:
shutil.copy(remote_file_path, dest_path)
return dest_path
else:
return remote_file_path
def _check_remote_path_legality(self, file_path: Text):
"""
Check if the file can be downloaded by blob manager.
:param file_path: The path of file to be checked.
"""
if not file_path.startswith(self.root_dir):
raise Exception("Cannot download {} from blob server".format(file_path))
| StarcoderdataPython |
8113202 | """
A simple and basic Python 3 https://aoe2.net/ API wrapper for sending `GET requests`.
Available on GitHub (+ documentation): https://github.com/sixP-NaraKa/aoe2net-api-wrapper
Additional data manipulation/extraction from the provided data by this API wrapper has to be done by you, the user.
See https://aoe2.net/#api & https://aoe2.net/#nightbot.
"""
import requests
import json as jsn
# api base urls
API_BASE_URL = "https://aoe2.net/api"
NIGHTBOT_BASE_URL = API_BASE_URL + "/nightbot" # "https://aoe2.net/api/nightbot"
# request api base urls (api endpoints)
STRINGS_URL = API_BASE_URL + "/strings"
LEADERBOARD_URL = API_BASE_URL + "/leaderboard"
LOBBIES_URL = API_BASE_URL + "/lobbies"
LAST_MATCH_URL = API_BASE_URL + "/player/lastmatch"
MATCH_HISTORY_URL = API_BASE_URL + "/player/matches"
RATING_HISTORY_URL = API_BASE_URL + "/player/ratinghistory"
MATCHES_URL = API_BASE_URL + "/matches"
MATCH_URL = API_BASE_URL + "/match"
NUMBERS_ONLINE_URL = API_BASE_URL + "/stats/players"
# request nightbot api base urls (api endpoints)
RANK_DETAILS_URL = NIGHTBOT_BASE_URL + "/rank?"
RECENT_OPPONENT_URL = NIGHTBOT_BASE_URL + "/opponent?"
CURRENT_MATCH_URL = NIGHTBOT_BASE_URL + "/match?"
CURRENT_CIVS_URL = NIGHTBOT_BASE_URL + "/civs?"
CURRENT_MAP_URL = NIGHTBOT_BASE_URL + "/map?"
# request headers
headers = {'content-type': 'application/json;charset=UTF-8'}
# simple base exception class, to raise errors with
class Aoe2NetException(Exception):
""" AoE2.net API error. """
""" ----------------------------------------------- HELPER FUNCTIONS -----------------------------------------------"""
def _is_valid_kwarg(provided: dict, available: dict):
"""
Helper function to check if a user provided dictionary has the correct arguments,
compared to a dictionary with the actual available arguments.
Updates, if no difference found, the dictionary 'available'.
Parameters
----------
provided : `dict`
The user defined dictionary of optional additional arguments.
available : `dict`
The available optional additional arguments possible.
:raises KeyError:
invalid additional keyword argument supplied
"""
diff = provided.keys() - available.keys()
if diff: # if there are differences
msg = "invalid optional keyword argument passed: {}. Available arguments: {}".format(diff, list(available.keys()))
raise KeyError(msg)
available.update(provided)
return available
def _get_request_response(url: str, params: dict = None, json: bool = True):
"""
Helper function to request data.
For the NIGHTBOT_API calls, the returned data is not JSON, but plain text.
Each of those functions will return the response.text explicitly.
Parameters
----------
url : `str`
The request to call the API with.
params : `dict`
A dictionary of parameters that will be used for a GET request.
json : `bool`
Specifies if the request response should be returned in JSON format. Defaults to True.
:return:
the request response
:raises requests.exceptions.RequestException:
if a exception happens during the request handling
:raises Aoe2NetExecution:
if status code of the response is not 200
"""
try:
response = requests.get(url, params=params, headers=headers)
except requests.exceptions.RequestException as rer:
raise requests.exceptions.RequestException(rer)
if response.status_code != 200:
msg = "Expected status code 200 - got {}.".format(response.status_code)
raise Aoe2NetException(msg)
if json:
try:
response = response.json()
except jsn.JSONDecodeError as jde:
raise Aoe2NetException(jde)
return response
""" ------------------------------------------- API REQUESTS (class API) -------------------------------------------"""
class API:
"""
The 'API' class encompasses the https://aoe2.net/#api API functions,
which can return their requested data in JSON format.
"""
def get_strings(self, game: str = "aoe2de", json: bool = True):
"""
Requests a list of strings used by the API.
Parameters
----------
game : `str`
The game for which to extract the list of strings. Defaults to "aoe2de" if omitted.
Possible games:
aoe2hd -> Age of Empires 2: HD Edition, aoe2de -> Age of Empires 2: Definitive Edition
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
"""
return _get_request_response(url=STRINGS_URL, params={"game": game}, json=json)
def get_leaderboard(self, leaderboard_id: int = 3, start: int = 1, count: int = 10, json: bool = True, **kwargs):
"""
Requests the data of the given leaderboard, specified by the 'leaderboard_id'.
Parameters
----------
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 Random Map, 4 -> Team Random Map
start : `int`
Specifies the start point for which to extract data at. Defaults to 1.
Ignored if 'search', 'steam_id' or 'profile_id' are defined.
count : `int`
Specifies how many entries of the given leaderboard should be extracted,
if able to find with the given criteria. Defaults to 10.
Max. 10000.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
**kwargs : `dict`
Additional optional arguments.
Possible arguments:
search : `str`
Specifies a player name to search for. All players found that match the given name will be returned.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over both 'search' and 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Takes precedence over 'search'.
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
'count' has to be 10000 or less.
"""
if count > 10000:
raise Aoe2NetException("'count' has to be 10000 or less.")
optionals = {
"search": "",
"steam_id": "",
"profile_id": "",
}
optionals = _is_valid_kwarg(kwargs, optionals)
params = {"game": "aoe2de", "leaderboard_id": leaderboard_id, "start": start, "count": count}
params.update(optionals)
return _get_request_response(url=LEADERBOARD_URL, params=params, json=json)
def get_open_lobbies(self, game: str = "aoe2de", json: bool = True):
"""
Requests all open lobbies.
Parameters
----------
game : `str`
The game for which to extract the lobby data. Defaults to "aoe2de" if omitted.
Possible games:
aoe2hd -> Age of Empires 2: HD Edition, aoe2de -> Age of Empires 2: Definitive Edition
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
"""
params = {"game": game}
return _get_request_response(url=LOBBIES_URL, params=params, json=json)
def get_last_match(self, steam_id: str = "", profile_id: str = "", json: bool = True):
"""
Requests the last match a player started playing.
This will be the current match if they still are in game.
Either 'steam_id' or 'profile_id' required.
Parameters
----------
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Defaults to an empty string.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
Either 'steam_id' or 'profile_id' required.
"""
if not steam_id and not profile_id:
raise Aoe2NetException("Either 'steam_id' or 'profile_id' required.")
params = {"steam_id": steam_id, "profile_id": profile_id}
return _get_request_response(url=LAST_MATCH_URL, params=params, json=json)
def get_match_history(self, start: int = 0, count: int = 5, steam_id: str = "", profile_id: str = "", json: bool = True):
"""
Requests the match history for a player.
Either 'steam_id' or 'profile_id' required.
Parameters
---------
start : `int`
Specifies the start point for which to extract data at. Defaults to 0 (most recent match).
count : `int`
Specifies how many entries of the given leaderboard should be extracted,
if able to find with the given criteria. Defaults to 5.
Max. 1000.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Defaults to an empty string.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
'count' has to be 1000 or less. || Either 'steam_id' or 'profile_id' required.
"""
if count > 1000:
raise Aoe2NetException("'count' has to be 1000 or less.")
if not steam_id and not profile_id:
raise Aoe2NetException("Either 'steam_id' or 'profile_id' required.")
params = {"start": start, "count": count, "steam_id": steam_id, "profile_id": profile_id}
return _get_request_response(url=MATCH_HISTORY_URL, params=params, json=json)
def get_rating_history(self, leaderboard_id: int = 3, start: int = 0, count: int = 100, steam_id: str = "", profile_id: str = "", json: bool = True):
"""
Requests the rating history for a player.
Either 'steam_id' or 'profile_id' required.
Parameters
---------
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 Random Map, 4 -> Team Random Map
start : `int`
Specifies the start point for which to extract data at. Defaults to 0 (most recent match).
Ignored if 'steam_id' or 'profile_id' are defined.
count : `int`
Specifies how many entries of the given leaderboard should be extracted,
if able to find with the given criteria. Defaults to 100.
Max. 10000.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Defaults to an empty string.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
'count' has to be 10000 or less. || Either 'steam_id' or 'profile_id' required.
"""
if count > 10000:
raise Aoe2NetException("'count' has to be 10000 or less.")
if not steam_id and not profile_id:
raise Aoe2NetException("Either 'steam_id' or 'profile_id' required.")
params = {"leaderboard_id": leaderboard_id, "start": start, "count": count, "steam_id": steam_id, "profile_id": profile_id}
return _get_request_response(url=RATING_HISTORY_URL, params=params, json=json)
def get_matches(self, count: int = 5, json: bool = True, **kwargs):
"""
Requests the match history in a optionally given time frame (globally).
If 'since' is not set, only the X amount of current past matches (specified by 'count') will be returned.
Parameters
---------
count : `int`
Specifies how many entries of the match history should be extracted. Defaults to 5.
Max. 1000.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
**kwargs : `dict`
Additional optional arguments.
Possible arguments:
since : `str` | `int`
Only shows matches after this timestamp. (ex: 1596775000)
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
'count' has to be 1000 or less.
"""
if count > 1000:
raise Aoe2NetException("'count' has to be 1000 or less.")
optionals = {"since": ""}
optionals = _is_valid_kwarg(kwargs, optionals)
params = {"count": count}
params.update(optionals)
return _get_request_response(url=MATCHES_URL, params=params, json=json)
def get_match(self, uuid: str = "", match_id: str = "", json: bool = True):
"""
Requests a single match (globally).
Either 'uuid' or 'match_id' required.
Parameters
---------
uuid : `str`
the Match UUID, viewable via a function such as 'ab_get_matches()'.
Takes precedence over 'match_id'.
match_id : `str`
the Match ID, viewable via a function such as 'ab_get_matches()'.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
Either 'uuid' or 'match_id' required.
"""
if not uuid and not match_id:
raise Aoe2NetException("Either 'uuid' or 'match_id' required.")
params = {"uuid": uuid, "match_id": match_id}
return _get_request_response(url=MATCH_URL, params=params, json=json)
def get_num_online(self, game: str = "aoe2de", json: bool = True):
"""
Requests the current player numbers of AoE2: DE.
Parameters
---------
game : `str`
The game for which to extract the player numbers. Defaults to "aoe2de" if omitted.
Possible games:
aoe2hd -> Age of Empires 2: HD Edition, aoe2de -> Age of Empires 2: Definitive Edition
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
"""
params = {"game": game}
return _get_request_response(url=NUMBERS_ONLINE_URL, params=params, json=json)
""" ------------------------------------ NIGHTBOT API REQUESTS (class Nightbot) ------------------------------------"""
class Nightbot:
"""
The 'Nighbot' class encompasses the https://aoe2.net/#nightbot Nightbot API functions,
which only return their requested data as plain text.
"""
def get_rank_details(self, search: str = "", steam_id: str = "", profile_id: str = "", leaderboard_id: int = 3):
"""
Requests the rank details of a player, specified by the 'leaderboard_id'.
Either 'search', 'steam_id' or 'profile_id' required.
The request response is only available as pure text.
Returns "Player not found", if no player could be found with the given criteria.
With some combinations of 'search', 'steam_id' and 'profile_id', if nothing could be found for example,
the current rank #1 player of the given optional additional 'leaderboard_id' will be returned by the API.
Parameters
----------
search : `str`
The name of the to be searched player. Returns the highest rated player found.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'search' and 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Takes precedence over 'search'.
Defaults to an empty string.
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 Random Map, 4 -> Team Random Map
:return:
the response.text
:raises Aoe2NetException:
Either 'search', 'steam_id' or 'profile_id' required.
"""
if not search and not steam_id and not profile_id:
raise Aoe2NetException("Either 'search', 'steam_id' or 'profile_id' required.")
params = {"flag": "false", "search": search, "steam_id": steam_id, "profile_id": profile_id, "leaderboard_id": leaderboard_id}
return _get_request_response(url=RANK_DETAILS_URL, params=params, json=False).text
def get_recent_opp(self, search: str = "", steam_id: str = "", profile_id: str = "", leaderboard_id: int = 3):
"""
Requests the rank details of the most recent opponent of a player (1v1 only).
Either 'steam_id' or 'profile_id' required.
The request response is only available as pure text.
Returns "Player not found", if no player could be found.
Parameters
----------
search : `str`
The name of the to be searched player. Returns the highest rated player found.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'search' and 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Takes precedence over 'search'.
Defaults to an empty string.
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Is used when 'search' is defined.
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 Random Map, 4 -> Team Random Map
:return:
the response.text
:raises Aoe2NetException:
Either 'search', 'steam_id' or 'profile_id' required.
"""
if not search and not steam_id and not profile_id:
raise Aoe2NetException("Either 'search', 'steam_id' or 'profile_id' required.")
params = {"flag": "false", "search": search, "steam_id": steam_id, "profile_id": profile_id, "leaderboard_id": leaderboard_id}
return _get_request_response(url=RECENT_OPPONENT_URL, params=params, json=False).text
def get_current_match(self, search: str = "", steam_id: str = "", profile_id: str = "", leaderboard_id: int = 3, **kwargs):
"""
Requests details about the last match, or current match if still in game, of a player.
Either 'search', 'steam_id' or 'profile_id' required.
The request response is only available as pure text.
Returns "Player not found", if no player could be found.
Parameters
----------
search : `str`
The name of the to be searched player. Returns the highest rated player found.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'search' and 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Takes precedence over 'search'.
Defaults to an empty string.
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Is used when 'search' is defined.
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 Random Map, 4 -> Team Random Map
**kwargs : `dict`
Additional optional arguments.
Possible arguments:
color : `bool`
The color the players picked in game to play as. Defaults to False.
:return:
the response.text
:raises Aoe2NetException:
Either 'search', 'steam_id' or 'profile_id' required.
"""
if not search and not steam_id and not profile_id:
raise Aoe2NetException("Either 'search', 'steam_id' or 'profile_id' required.")
optionals = {
"color": False
}
optionals = _is_valid_kwarg(kwargs, optionals)
params = {"flag": "false", "search": search, "steam_id": steam_id, "profile_id": profile_id, "leaderboard_id": leaderboard_id}
params.update(optionals)
color = params.get("color").__str__().lower()
params["color"] = color
return _get_request_response(url=CURRENT_MATCH_URL, params=params, json=False).text
def get_current_civs(self, search: str = "", steam_id: str = "", profile_id: str = "", leaderboard_id: int = 3):
"""
Requests details about the civilisations from the current (if still in game) or last match.
Either 'steam_id' or 'profile_id' required.
The request response is only available as pure text.
Returns "Player not found", if no player could be found.
Parameters
----------
search : `str`
The name of the to be searched player. Returns the highest rated player found.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'search' and 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Takes precedence over 'search'.
Defaults to an empty string.
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Is used when 'search' is defined.
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 Random Map, 4 -> Team Random Map
:return:
the response.text
:raises Aoe2NetException:
Either 'search', 'steam_id' or 'profile_id' required.
"""
if not search and not steam_id and not profile_id:
raise Aoe2NetException("Either 'search', 'steam_id' or 'profile_id' required.")
params = {"search": search, "steam_id": steam_id, "profile_id": profile_id, "leaderboard_id": leaderboard_id}
return _get_request_response(url=CURRENT_CIVS_URL, params=params, json=False).text
def get_current_map(self, search: str = "", steam_id: str = "", profile_id: str = "", leaderboard_id: int = 3):
"""
Requests the current map name of a player.
Either 'search', 'steam_id' or 'profile_id' required.
The request response is only available as pure text.
Returns "Player not found", if no player could be found.
Parameters
----------
search : `str`
The name of the to be searched player. Returns the highest rated player found.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'search' and 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Takes precedence over 'search'.
Defaults to an empty string.
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Is used when 'search' is defined.
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 Random Map, 4 -> Team Random Map
:return:
the response.text
:raises Aoe2NetException:
Either 'search', 'steam_id' or 'profile_id' required.
"""
if not search and not steam_id and not profile_id:
raise Aoe2NetException("Either 'search', 'steam_id' or 'profile_id' required.")
params = {"search": search, "steam_id": steam_id, "profile_id": profile_id, "leaderboard_id": leaderboard_id}
return _get_request_response(url=CURRENT_MAP_URL, params=params, json=False).text
| StarcoderdataPython |
1680827 | <reponame>thewahome/msgraph-cli
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class CollectionOfActivityHistoryItem(msrest.serialization.Model):
"""Collection of activityHistoryItem.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~cross_device_experiences.models.MicrosoftGraphActivityHistoryItem]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphActivityHistoryItem]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfActivityHistoryItem, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfDevice(msrest.serialization.Model):
"""Collection of device.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~cross_device_experiences.models.MicrosoftGraphDevice]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphDevice]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfDevice, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfUserActivity(msrest.serialization.Model):
"""Collection of userActivity.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~cross_device_experiences.models.MicrosoftGraphUserActivity]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphUserActivity]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfUserActivity, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class MicrosoftGraphEntity(msrest.serialization.Model):
"""entity.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param id: Read-only.
:type id: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphEntity, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.id = kwargs.get('id', None)
class MicrosoftGraphActivityHistoryItem(MicrosoftGraphEntity):
"""activityHistoryItem.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param active_duration_seconds: Optional. The duration of active user engagement. if not
supplied, this is calculated from the startedDateTime and lastActiveDateTime.
:type active_duration_seconds: int
:param created_date_time: Set by the server. DateTime in UTC when the object was created on the
server.
:type created_date_time: ~datetime.datetime
:param expiration_date_time: Optional. UTC DateTime when the historyItem will undergo hard-
delete. Can be set by the client.
:type expiration_date_time: ~datetime.datetime
:param last_active_date_time: Optional. UTC DateTime when the historyItem (activity session)
was last understood as active or finished - if null, historyItem status should be Ongoing.
:type last_active_date_time: ~datetime.datetime
:param last_modified_date_time: Set by the server. DateTime in UTC when the object was modified
on the server.
:type last_modified_date_time: ~datetime.datetime
:param started_date_time: Required. UTC DateTime when the historyItem (activity session) was
started. Required for timeline history.
:type started_date_time: ~datetime.datetime
:param status: Possible values include: "active", "updated", "deleted", "ignored",
"unknownFutureValue".
:type status: str or ~cross_device_experiences.models.MicrosoftGraphStatus
:param user_timezone: Optional. The timezone in which the user's device used to generate the
activity was located at activity creation time. Values supplied as Olson IDs in order to
support cross-platform representation.
:type user_timezone: str
:param activity: userActivity.
:type activity: ~cross_device_experiences.models.MicrosoftGraphUserActivity
"""
_validation = {
'active_duration_seconds': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'active_duration_seconds': {'key': 'activeDurationSeconds', 'type': 'int'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
'last_active_date_time': {'key': 'lastActiveDateTime', 'type': 'iso-8601'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'started_date_time': {'key': 'startedDateTime', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'str'},
'user_timezone': {'key': 'userTimezone', 'type': 'str'},
'activity': {'key': 'activity', 'type': 'MicrosoftGraphUserActivity'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphActivityHistoryItem, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.active_duration_seconds = kwargs.get('active_duration_seconds', None)
self.created_date_time = kwargs.get('created_date_time', None)
self.expiration_date_time = kwargs.get('expiration_date_time', None)
self.last_active_date_time = kwargs.get('last_active_date_time', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.started_date_time = kwargs.get('started_date_time', None)
self.status = kwargs.get('status', None)
self.user_timezone = kwargs.get('user_timezone', None)
self.activity = kwargs.get('activity', None)
class MicrosoftGraphAlternativeSecurityId(msrest.serialization.Model):
"""alternativeSecurityId.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param identity_provider: For internal use only.
:type identity_provider: str
:param key: For internal use only.
:type key: bytes
:param type: For internal use only.
:type type: int
"""
_validation = {
'type': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'identity_provider': {'key': 'identityProvider', 'type': 'str'},
'key': {'key': 'key', 'type': 'base64'},
'type': {'key': 'type', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphAlternativeSecurityId, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.identity_provider = kwargs.get('identity_provider', None)
self.key = kwargs.get('key', None)
self.type = kwargs.get('type', None)
class MicrosoftGraphCommand(MicrosoftGraphEntity):
"""command.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param app_service_name:
:type app_service_name: str
:param error:
:type error: str
:param package_family_name:
:type package_family_name: str
:param payload: payloadRequest.
:type payload: dict[str, object]
:param permission_ticket:
:type permission_ticket: str
:param post_back_uri:
:type post_back_uri: str
:param status:
:type status: str
:param type:
:type type: str
:param responsepayload: payloadResponse.
:type responsepayload: ~cross_device_experiences.models.MicrosoftGraphPayloadResponse
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'app_service_name': {'key': 'appServiceName', 'type': 'str'},
'error': {'key': 'error', 'type': 'str'},
'package_family_name': {'key': 'packageFamilyName', 'type': 'str'},
'payload': {'key': 'payload', 'type': '{object}'},
'permission_ticket': {'key': 'permissionTicket', 'type': 'str'},
'post_back_uri': {'key': 'postBackUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'responsepayload': {'key': 'responsepayload', 'type': 'MicrosoftGraphPayloadResponse'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCommand, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.app_service_name = kwargs.get('app_service_name', None)
self.error = kwargs.get('error', None)
self.package_family_name = kwargs.get('package_family_name', None)
self.payload = kwargs.get('payload', None)
self.permission_ticket = kwargs.get('permission_ticket', None)
self.post_back_uri = kwargs.get('post_back_uri', None)
self.status = kwargs.get('status', None)
self.type = kwargs.get('type', None)
self.responsepayload = kwargs.get('responsepayload', None)
class MicrosoftGraphDirectoryObject(MicrosoftGraphEntity):
"""Represents an Azure Active Directory object. The directoryObject type is the base type for many other directory entity types.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param deleted_date_time:
:type deleted_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'deleted_date_time': {'key': 'deletedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphDirectoryObject, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.deleted_date_time = kwargs.get('deleted_date_time', None)
class MicrosoftGraphDevice(MicrosoftGraphDirectoryObject):
"""Represents an Azure Active Directory object. The directoryObject type is the base type for many other directory entity types.
:param id: Read-only.
:type id: str
:param deleted_date_time:
:type deleted_date_time: ~datetime.datetime
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_enabled: true if the account is enabled; otherwise, false. Required.
:type account_enabled: bool
:param alternative_security_ids: For internal use only. Not nullable.
:type alternative_security_ids:
list[~cross_device_experiences.models.MicrosoftGraphAlternativeSecurityId]
:param approximate_last_sign_in_date_time: The timestamp type represents date and time
information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan
1, 2014 would look like this: '2014-01-01T00:00:00Z'. Read-only.
:type approximate_last_sign_in_date_time: ~datetime.datetime
:param compliance_expiration_date_time: The timestamp when the device is no longer deemed
compliant. The timestamp type represents date and time information using ISO 8601 format and is
always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this:
'2014-01-01T00:00:00Z'. Read-only.
:type compliance_expiration_date_time: ~datetime.datetime
:param device_category:
:type device_category: str
:param device_id: Unique identifier set by Azure Device Registration Service at the time of
registration.
:type device_id: str
:param device_metadata: For internal use only. Set to null.
:type device_metadata: str
:param device_ownership:
:type device_ownership: str
:param device_version: For internal use only.
:type device_version: int
:param display_name: The display name for the device. Required.
:type display_name: str
:param domain_name:
:type domain_name: str
:param enrollment_profile_name:
:type enrollment_profile_name: str
:param enrollment_type:
:type enrollment_type: str
:param extension_attributes: onPremisesExtensionAttributes.
:type extension_attributes:
~cross_device_experiences.models.MicrosoftGraphOnPremisesExtensionAttributes
:param is_compliant: true if the device complies with Mobile Device Management (MDM) policies;
otherwise, false. Read-only. This can only be updated by Intune for any device OS type or by an
approved MDM app for Windows OS devices.
:type is_compliant: bool
:param is_managed: true if the device is managed by a Mobile Device Management (MDM) app;
otherwise, false. This can only be updated by Intune for any device OS type or by an approved
MDM app for Windows OS devices.
:type is_managed: bool
:param is_rooted:
:type is_rooted: bool
:param management_type:
:type management_type: str
:param on_premises_last_sync_date_time: The last time at which the object was synced with the
on-premises directory. The Timestamp type represents date and time information using ISO 8601
format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like
this: '2014-01-01T00:00:00Z' Read-only.
:type on_premises_last_sync_date_time: ~datetime.datetime
:param on_premises_sync_enabled: true if this object is synced from an on-premises directory;
false if this object was originally synced from an on-premises directory but is no longer
synced; null if this object has never been synced from an on-premises directory (default).
Read-only.
:type on_premises_sync_enabled: bool
:param operating_system: The type of operating system on the device. Required.
:type operating_system: str
:param operating_system_version: The version of the operating system on the device. Required.
:type operating_system_version: str
:param physical_ids: For internal use only. Not nullable.
:type physical_ids: list[str]
:param profile_type: The profile type of the device. Possible values:RegisteredDevice
(default)SecureVMPrinterSharedIoT.
:type profile_type: str
:param registration_date_time:
:type registration_date_time: ~datetime.datetime
:param system_labels: List of labels applied to the device by the system.
:type system_labels: list[str]
:param trust_type: Type of trust for the joined device. Read-only. Possible values: Workplace -
indicates bring your own personal devicesAzureAd - Cloud only joined devicesServerAd - on-
premises domain joined devices joined to Azure AD. For more details, see Introduction to device
management in Azure Active Directory.
:type trust_type: str
:param kind:
:type kind: str
:param manufacturer: Manufacturer of the device. Read-only.
:type manufacturer: str
:param model: Model of the device. Read-only.
:type model: str
:param name:
:type name: str
:param platform:
:type platform: str
:param status:
:type status: str
:param member_of: Groups that this group is a member of. HTTP Methods: GET (supported for all
groups). Read-only. Nullable.
:type member_of: list[~cross_device_experiences.models.MicrosoftGraphDirectoryObject]
:param registered_owners: The user that cloud joined the device or registered their personal
device. The registered owner is set at the time of registration. Currently, there can be only
one owner. Read-only. Nullable.
:type registered_owners: list[~cross_device_experiences.models.MicrosoftGraphDirectoryObject]
:param registered_users: Collection of registered users of the device. For cloud joined devices
and registered personal devices, registered users are set to the same value as registered
owners at the time of registration. Read-only. Nullable.
:type registered_users: list[~cross_device_experiences.models.MicrosoftGraphDirectoryObject]
:param transitive_member_of:
:type transitive_member_of:
list[~cross_device_experiences.models.MicrosoftGraphDirectoryObject]
:param extensions: The collection of open extensions defined for the device. Read-only.
Nullable.
:type extensions: list[~cross_device_experiences.models.MicrosoftGraphExtension]
:param commands:
:type commands: list[~cross_device_experiences.models.MicrosoftGraphCommand]
"""
_validation = {
'device_version': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'deleted_date_time': {'key': 'deletedDateTime', 'type': 'iso-8601'},
'additional_properties': {'key': '', 'type': '{object}'},
'account_enabled': {'key': 'accountEnabled', 'type': 'bool'},
'alternative_security_ids': {'key': 'alternativeSecurityIds', 'type': '[MicrosoftGraphAlternativeSecurityId]'},
'approximate_last_sign_in_date_time': {'key': 'approximateLastSignInDateTime', 'type': 'iso-8601'},
'compliance_expiration_date_time': {'key': 'complianceExpirationDateTime', 'type': 'iso-8601'},
'device_category': {'key': 'deviceCategory', 'type': 'str'},
'device_id': {'key': 'deviceId', 'type': 'str'},
'device_metadata': {'key': 'deviceMetadata', 'type': 'str'},
'device_ownership': {'key': 'deviceOwnership', 'type': 'str'},
'device_version': {'key': 'deviceVersion', 'type': 'int'},
'display_name': {'key': 'displayName', 'type': 'str'},
'domain_name': {'key': 'domainName', 'type': 'str'},
'enrollment_profile_name': {'key': 'enrollmentProfileName', 'type': 'str'},
'enrollment_type': {'key': 'enrollmentType', 'type': 'str'},
'extension_attributes': {'key': 'extensionAttributes', 'type': 'MicrosoftGraphOnPremisesExtensionAttributes'},
'is_compliant': {'key': 'isCompliant', 'type': 'bool'},
'is_managed': {'key': 'isManaged', 'type': 'bool'},
'is_rooted': {'key': 'isRooted', 'type': 'bool'},
'management_type': {'key': 'managementType', 'type': 'str'},
'on_premises_last_sync_date_time': {'key': 'onPremisesLastSyncDateTime', 'type': 'iso-8601'},
'on_premises_sync_enabled': {'key': 'onPremisesSyncEnabled', 'type': 'bool'},
'operating_system': {'key': 'operatingSystem', 'type': 'str'},
'operating_system_version': {'key': 'operatingSystemVersion', 'type': 'str'},
'physical_ids': {'key': 'physicalIds', 'type': '[str]'},
'profile_type': {'key': 'profileType', 'type': 'str'},
'registration_date_time': {'key': 'registrationDateTime', 'type': 'iso-8601'},
'system_labels': {'key': 'systemLabels', 'type': '[str]'},
'trust_type': {'key': 'trustType', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'manufacturer': {'key': 'manufacturer', 'type': 'str'},
'model': {'key': 'model', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'platform': {'key': 'platform', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'member_of': {'key': 'memberOf', 'type': '[MicrosoftGraphDirectoryObject]'},
'registered_owners': {'key': 'registeredOwners', 'type': '[MicrosoftGraphDirectoryObject]'},
'registered_users': {'key': 'registeredUsers', 'type': '[MicrosoftGraphDirectoryObject]'},
'transitive_member_of': {'key': 'transitiveMemberOf', 'type': '[MicrosoftGraphDirectoryObject]'},
'extensions': {'key': 'extensions', 'type': '[MicrosoftGraphExtension]'},
'commands': {'key': 'commands', 'type': '[MicrosoftGraphCommand]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphDevice, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_enabled = kwargs.get('account_enabled', None)
self.alternative_security_ids = kwargs.get('alternative_security_ids', None)
self.approximate_last_sign_in_date_time = kwargs.get('approximate_last_sign_in_date_time', None)
self.compliance_expiration_date_time = kwargs.get('compliance_expiration_date_time', None)
self.device_category = kwargs.get('device_category', None)
self.device_id = kwargs.get('device_id', None)
self.device_metadata = kwargs.get('device_metadata', None)
self.device_ownership = kwargs.get('device_ownership', None)
self.device_version = kwargs.get('device_version', None)
self.display_name = kwargs.get('display_name', None)
self.domain_name = kwargs.get('domain_name', None)
self.enrollment_profile_name = kwargs.get('enrollment_profile_name', None)
self.enrollment_type = kwargs.get('enrollment_type', None)
self.extension_attributes = kwargs.get('extension_attributes', None)
self.is_compliant = kwargs.get('is_compliant', None)
self.is_managed = kwargs.get('is_managed', None)
self.is_rooted = kwargs.get('is_rooted', None)
self.management_type = kwargs.get('management_type', None)
self.on_premises_last_sync_date_time = kwargs.get('on_premises_last_sync_date_time', None)
self.on_premises_sync_enabled = kwargs.get('on_premises_sync_enabled', None)
self.operating_system = kwargs.get('operating_system', None)
self.operating_system_version = kwargs.get('operating_system_version', None)
self.physical_ids = kwargs.get('physical_ids', None)
self.profile_type = kwargs.get('profile_type', None)
self.registration_date_time = kwargs.get('registration_date_time', None)
self.system_labels = kwargs.get('system_labels', None)
self.trust_type = kwargs.get('trust_type', None)
self.kind = kwargs.get('kind', None)
self.manufacturer = kwargs.get('manufacturer', None)
self.model = kwargs.get('model', None)
self.name = kwargs.get('name', None)
self.platform = kwargs.get('platform', None)
self.status = kwargs.get('status', None)
self.member_of = kwargs.get('member_of', None)
self.registered_owners = kwargs.get('registered_owners', None)
self.registered_users = kwargs.get('registered_users', None)
self.transitive_member_of = kwargs.get('transitive_member_of', None)
self.extensions = kwargs.get('extensions', None)
self.commands = kwargs.get('commands', None)
class MicrosoftGraphExtension(MicrosoftGraphEntity):
"""extension.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphExtension, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
class MicrosoftGraphImageInfo(msrest.serialization.Model):
"""imageInfo.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param add_image_query: Optional; parameter used to indicate the server is able to render image
dynamically in response to parameterization. For example – a high contrast image.
:type add_image_query: bool
:param alternate_text: Optional; alt-text accessible content for the image.
:type alternate_text: str
:param alternative_text:
:type alternative_text: str
:param icon_url: Optional; URI that points to an icon which represents the application used to
generate the activity.
:type icon_url: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'add_image_query': {'key': 'addImageQuery', 'type': 'bool'},
'alternate_text': {'key': 'alternateText', 'type': 'str'},
'alternative_text': {'key': 'alternativeText', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphImageInfo, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.add_image_query = kwargs.get('add_image_query', None)
self.alternate_text = kwargs.get('alternate_text', None)
self.alternative_text = kwargs.get('alternative_text', None)
self.icon_url = kwargs.get('icon_url', None)
class MicrosoftGraphOnPremisesExtensionAttributes(msrest.serialization.Model):
"""onPremisesExtensionAttributes.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param extension_attribute1: First customizable extension attribute.
:type extension_attribute1: str
:param extension_attribute10: Tenth customizable extension attribute.
:type extension_attribute10: str
:param extension_attribute11: Eleventh customizable extension attribute.
:type extension_attribute11: str
:param extension_attribute12: Twelfth customizable extension attribute.
:type extension_attribute12: str
:param extension_attribute13: Thirteenth customizable extension attribute.
:type extension_attribute13: str
:param extension_attribute14: Fourteenth customizable extension attribute.
:type extension_attribute14: str
:param extension_attribute15: Fifteenth customizable extension attribute.
:type extension_attribute15: str
:param extension_attribute2: Second customizable extension attribute.
:type extension_attribute2: str
:param extension_attribute3: Third customizable extension attribute.
:type extension_attribute3: str
:param extension_attribute4: Fourth customizable extension attribute.
:type extension_attribute4: str
:param extension_attribute5: Fifth customizable extension attribute.
:type extension_attribute5: str
:param extension_attribute6: Sixth customizable extension attribute.
:type extension_attribute6: str
:param extension_attribute7: Seventh customizable extension attribute.
:type extension_attribute7: str
:param extension_attribute8: Eighth customizable extension attribute.
:type extension_attribute8: str
:param extension_attribute9: Ninth customizable extension attribute.
:type extension_attribute9: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'extension_attribute1': {'key': 'extensionAttribute1', 'type': 'str'},
'extension_attribute10': {'key': 'extensionAttribute10', 'type': 'str'},
'extension_attribute11': {'key': 'extensionAttribute11', 'type': 'str'},
'extension_attribute12': {'key': 'extensionAttribute12', 'type': 'str'},
'extension_attribute13': {'key': 'extensionAttribute13', 'type': 'str'},
'extension_attribute14': {'key': 'extensionAttribute14', 'type': 'str'},
'extension_attribute15': {'key': 'extensionAttribute15', 'type': 'str'},
'extension_attribute2': {'key': 'extensionAttribute2', 'type': 'str'},
'extension_attribute3': {'key': 'extensionAttribute3', 'type': 'str'},
'extension_attribute4': {'key': 'extensionAttribute4', 'type': 'str'},
'extension_attribute5': {'key': 'extensionAttribute5', 'type': 'str'},
'extension_attribute6': {'key': 'extensionAttribute6', 'type': 'str'},
'extension_attribute7': {'key': 'extensionAttribute7', 'type': 'str'},
'extension_attribute8': {'key': 'extensionAttribute8', 'type': 'str'},
'extension_attribute9': {'key': 'extensionAttribute9', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphOnPremisesExtensionAttributes, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.extension_attribute1 = kwargs.get('extension_attribute1', None)
self.extension_attribute10 = kwargs.get('extension_attribute10', None)
self.extension_attribute11 = kwargs.get('extension_attribute11', None)
self.extension_attribute12 = kwargs.get('extension_attribute12', None)
self.extension_attribute13 = kwargs.get('extension_attribute13', None)
self.extension_attribute14 = kwargs.get('extension_attribute14', None)
self.extension_attribute15 = kwargs.get('extension_attribute15', None)
self.extension_attribute2 = kwargs.get('extension_attribute2', None)
self.extension_attribute3 = kwargs.get('extension_attribute3', None)
self.extension_attribute4 = kwargs.get('extension_attribute4', None)
self.extension_attribute5 = kwargs.get('extension_attribute5', None)
self.extension_attribute6 = kwargs.get('extension_attribute6', None)
self.extension_attribute7 = kwargs.get('extension_attribute7', None)
self.extension_attribute8 = kwargs.get('extension_attribute8', None)
self.extension_attribute9 = kwargs.get('extension_attribute9', None)
class MicrosoftGraphPayloadResponse(MicrosoftGraphEntity):
"""payloadResponse.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphPayloadResponse, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
class MicrosoftGraphUserActivity(MicrosoftGraphEntity):
"""userActivity.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param activation_url: Required. URL used to launch the activity in the best native experience
represented by the appId. Might launch a web-based app if no native app exists.
:type activation_url: str
:param activity_source_host: Required. URL for the domain representing the cross-platform
identity mapping for the app. Mapping is stored either as a JSON file hosted on the domain or
configurable via Windows Dev Center. The JSON file is named cross-platform-app-identifiers and
is hosted at root of your HTTPS domain, either at the top level domain or include a sub domain.
For example: https://contoso.com or https://myapp.contoso.com but NOT
https://myapp.contoso.com/somepath. You must have a unique file and domain (or sub domain) per
cross-platform app identity. For example, a separate file and domain is needed for Word vs.
PowerPoint.
:type activity_source_host: str
:param app_activity_id: Required. The unique activity ID in the context of the app - supplied
by caller and immutable thereafter.
:type app_activity_id: str
:param app_display_name: Optional. Short text description of the app used to generate the
activity for use in cases when the app is not installed on the user’s local device.
:type app_display_name: str
:param content_info: Json.
:type content_info: dict[str, object]
:param content_url: Optional. Used in the event the content can be rendered outside of a native
or web-based app experience (for example, a pointer to an item in an RSS feed).
:type content_url: str
:param created_date_time: Set by the server. DateTime in UTC when the object was created on the
server.
:type created_date_time: ~datetime.datetime
:param expiration_date_time: Set by the server. DateTime in UTC when the object expired on the
server.
:type expiration_date_time: ~datetime.datetime
:param fallback_url: Optional. URL used to launch the activity in a web-based app, if
available.
:type fallback_url: str
:param last_modified_date_time: Set by the server. DateTime in UTC when the object was modified
on the server.
:type last_modified_date_time: ~datetime.datetime
:param status: Possible values include: "active", "updated", "deleted", "ignored",
"unknownFutureValue".
:type status: str or ~cross_device_experiences.models.MicrosoftGraphStatus
:param user_timezone: Optional. The timezone in which the user's device used to generate the
activity was located at activity creation time; values supplied as Olson IDs in order to
support cross-platform representation.
:type user_timezone: str
:param visual_elements: visualInfo.
:type visual_elements: ~cross_device_experiences.models.MicrosoftGraphVisualInfo
:param history_items: Optional. NavigationProperty/Containment; navigation property to the
activity's historyItems.
:type history_items: list[~cross_device_experiences.models.MicrosoftGraphActivityHistoryItem]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'activation_url': {'key': 'activationUrl', 'type': 'str'},
'activity_source_host': {'key': 'activitySourceHost', 'type': 'str'},
'app_activity_id': {'key': 'appActivityId', 'type': 'str'},
'app_display_name': {'key': 'appDisplayName', 'type': 'str'},
'content_info': {'key': 'contentInfo', 'type': '{object}'},
'content_url': {'key': 'contentUrl', 'type': 'str'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
'fallback_url': {'key': 'fallbackUrl', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'str'},
'user_timezone': {'key': 'userTimezone', 'type': 'str'},
'visual_elements': {'key': 'visualElements', 'type': 'MicrosoftGraphVisualInfo'},
'history_items': {'key': 'historyItems', 'type': '[MicrosoftGraphActivityHistoryItem]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphUserActivity, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.activation_url = kwargs.get('activation_url', None)
self.activity_source_host = kwargs.get('activity_source_host', None)
self.app_activity_id = kwargs.get('app_activity_id', None)
self.app_display_name = kwargs.get('app_display_name', None)
self.content_info = kwargs.get('content_info', None)
self.content_url = kwargs.get('content_url', None)
self.created_date_time = kwargs.get('created_date_time', None)
self.expiration_date_time = kwargs.get('expiration_date_time', None)
self.fallback_url = kwargs.get('fallback_url', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.status = kwargs.get('status', None)
self.user_timezone = kwargs.get('user_timezone', None)
self.visual_elements = kwargs.get('visual_elements', None)
self.history_items = kwargs.get('history_items', None)
class MicrosoftGraphVisualInfo(msrest.serialization.Model):
"""visualInfo.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param attribution: imageInfo.
:type attribution: ~cross_device_experiences.models.MicrosoftGraphImageInfo
:param background_color: Optional. Background color used to render the activity in the UI -
brand color for the application source of the activity. Must be a valid hex color.
:type background_color: str
:param content: Json.
:type content: dict[str, object]
:param description: Optional. Longer text description of the user's unique activity (example:
document name, first sentence, and/or metadata).
:type description: str
:param display_text: Required. Short text description of the user's unique activity (for
example, document name in cases where an activity refers to document creation).
:type display_text: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'attribution': {'key': 'attribution', 'type': 'MicrosoftGraphImageInfo'},
'background_color': {'key': 'backgroundColor', 'type': 'str'},
'content': {'key': 'content', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'display_text': {'key': 'displayText', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphVisualInfo, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.attribution = kwargs.get('attribution', None)
self.background_color = kwargs.get('background_color', None)
self.content = kwargs.get('content', None)
self.description = kwargs.get('description', None)
self.display_text = kwargs.get('display_text', None)
class OdataError(msrest.serialization.Model):
"""OdataError.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param error: Required.
:type error: ~cross_device_experiences.models.OdataErrorMain
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'error': {'key': 'error', 'type': 'OdataErrorMain'},
}
def __init__(
self,
**kwargs
):
super(OdataError, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.error = kwargs['error']
class OdataErrorDetail(msrest.serialization.Model):
"""OdataErrorDetail.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OdataErrorDetail, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
class OdataErrorMain(msrest.serialization.Model):
"""OdataErrorMain.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
:param details:
:type details: list[~cross_device_experiences.models.OdataErrorDetail]
:param innererror: The structure of this object is service-specific.
:type innererror: dict[str, object]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[OdataErrorDetail]'},
'innererror': {'key': 'innererror', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(OdataErrorMain, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
self.innererror = kwargs.get('innererror', None)
| StarcoderdataPython |
3509943 | # -*- coding: utf-8 -*-
import numpy as np
import copy, pdb
from collections import defaultdict
class Rel(object):
""" reliability class.
In this class, we can evaluate the joints' reliability
according to their behavior( spatio & temporal), kinemetic
( physical) and tacking( Kinect) feature.
"""
def __init__(self):
"""initialize parameters
"""
# kinematic segment length (unit:cm)
self.kinseg = {}
self.kinseg[0] = 13.4 # head2neck
self.kinseg[1] = 8.3 # neck2spins
self.kinseg[2] = 15.4 # spins2spinm
self.kinseg[3] = 32.5 # spinm2spinb
self.kinseg[4] = 16.65 # spins2shlder
self.kinseg[5] = 33.2 # shlder2elbow
self.kinseg[6] = 27.1 # elbow2wrist
# target joint order
self.trg_jorder = [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 20]
self.jointary = defaultdict(list) # joint array
self.Rb = defaultdict(list)
self.Rt = defaultdict(list)
self.Rk = defaultdict(list)
# gaussian parameter
sigma = 0.65
self.gw = 1/(2*np.pi)**2/sigma*np.exp(-0.5*np.arange(2)**2/sigma**2)
self.gw = self.gw*(1/sum(self.gw))
def rel_behav(self, joint, th=0.03, fsize=3): # behavior term
""" according to the joint's position in frame t, t-1 and t-2
calculate the behavior reliability term
joint : 3D joint position in [..., f-4, f-3, f-2, f-1, f]
th : threshold (uint: m)
"""
r = 1
if len(joint) >= fsize:
for k in xrange(1):
dist2 = joint[-(k+1)]-joint[-(k+2)]
dist1 = joint[-(k+1)]-joint[-(k+3)]
n_dist2 = np.linalg.norm(dist2)
n_dist1 = np.linalg.norm(dist1)
if (n_dist1 < th):
r = 1
else:
if (n_dist2 > th):
r = max(1-4*(n_dist2-th)/th, 0)
else:
r = 1
return r
def rel_kin(self, joints): # kinematic term
""" according to the segment length of each joint pair
calculate the kinematic reliability term
"""
order1 = [9, 5, 20, 1, 2]
order2 = [8, 6, 4, 20, 3] # joints' order
order3 = [10, 4, 8, 0, 20]
refer1 = [5, 6, 4, 2, 0] # kinseg's order
refer2 = [6, 5, 4, 3, 1]
segrel = defaultdict(lambda: int(0))
result = []
cnts = np.zeros(21)
for i in xrange(len(order1)):
A = np.array([joints[order1[i]].Position.x, joints[order1[i]].Position.y, joints[order1[i]].Position.z])
B = np.array([joints[order2[i]].Position.x, joints[order2[i]].Position.y, joints[order2[i]].Position.z])
C = np.array([joints[order3[i]].Position.x, joints[order3[i]].Position.y, joints[order3[i]].Position.z])
tmp = min(np.abs(np.linalg.norm(A-B)*100-self.kinseg[refer1[i]])/self.kinseg[refer1[i]], 1)
segrel[order1[i]] += tmp
segrel[order2[i]] += tmp
tmp = min(np.abs(np.linalg.norm(A-C)*100-self.kinseg[refer2[i]])/self.kinseg[refer2[i]], 1)
segrel[order1[i]] += tmp
segrel[order3[i]] += tmp
cnts[order1[i]] += 2
cnts[order2[i]] += 1
cnts[order3[i]] += 1
for i in self.trg_jorder:
result.append(1-(segrel[i]/cnts[i]))
return result
def rel_trk(self, joints): # tracking term
""" Kinect sensor's tracking state of each joint
"""
trkrel = []
for i in self.trg_jorder:
if joints[i].TrackingState == 2:
trkrel.append(1.0)
elif joints[i].TrackingState == 1:
trkrel.append(1.0)
else:
trkrel.append(0.0)
return trkrel
def rel_overall(self, Rb, Rk, Rt, order, flen=2):
"""combine the behavior, kinematic and tracking reliability
calculate overall reliability score
"""
Relary = np.zeros(21)
Rel = defaultdict(int)
if (len(Rb[0]) >= flen) & (len(Rk[0]) >= flen) & (len(Rt[0]) >= flen):
if order == self.trg_jorder:
for j in order:
for i in xrange(flen):
Rel[j] += self.gw[i]*min(Rb[j][-(i+1)], Rk[j][-(i+1)], Rt[j][-(i+1)])
Relary[j] += self.gw[i]*min(Rb[j][-(i+1)], Rk[j][-(i+1)], Rt[j][-(i+1)])
else:
raise ImportError('joints order not match !!')
else:
return Rel, np.array([])
return Rel, Relary
def run(self, jdic):
"""calculate joints' relability for each frame
"""
rt = self.rel_trk(jdic)
rk = self.rel_kin(jdic)
for jj, ii in enumerate(self.trg_jorder):
self.jointary[ii].append(np.array([jdic[ii].Position.x, jdic[ii].Position.y, jdic[ii].Position.z]))
self.Rb[ii].append(self.rel_behav(self.jointary[ii]))
self.Rt[ii].append(rt[jj])
self.Rk[ii].append(rk[jj])
return self.rel_overall(self.Rb, self.Rk, self.Rt, self.trg_jorder)
| StarcoderdataPython |
4999596 | <reponame>ericphanson/arxiv-search
import os
import json
import time
import pickle
import argparse
import dateutil.parser
from dateutil.tz import tzutc
from datetime import datetime, timedelta
from pytz import timezone
import copy
from random import shuffle, randrange, uniform
from flask.json import jsonify
from sqlite3 import dbapi2 as sqlite3
from hashlib import md5
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
from flask_limiter import Limiter
from werkzeug import check_password_hash, generate_password_hash
import re
from elasticsearch import Elasticsearch, RequestsHttpConnection
from elasticsearch.helpers import streaming_bulk, bulk, parallel_bulk
import elasticsearch
from itertools import islice
import certifi
from elasticsearch_dsl import Search, Q, A, Mapping
from elasticsearch_dsl import FacetedSearch, TermsFacet, RangeFacet, DateHistogramFacet
from elasticsearch_dsl.query import MultiMatch, Match, DisMax
from aws_requests_auth.aws_auth import AWSRequestsAuth
from cmreslogging.handlers import CMRESHandler
import requests
from requests_aws4auth import AWS4Auth
from pyparsing import Word, alphas, Literal, Group, Suppress, OneOrMore, oneOf
import threading
# -----------------------------------------------------------------------------
stop_words = ["the", "of", "and", "in", "a", "to", "we", "for", "mathcal", "can", "is", "this", "with", "by", "that", "as", "to"]
root_dir = os.path.join(".")
def key_dir(file): return os.path.join(root_dir,"server","keys",file)
def server_dir(file): return os.path.join(root_dir,"server", file)
def shared_dir(file): return os.path.join(root_dir,"shared", file)
database_path = os.path.join(root_dir,"server", 'user_db', 'as.db')
schema_path = os.path.join(root_dir,"server", 'user_db', 'schema.sql')
def strip_version(idstr):
""" identity function if arxiv id has no version, otherwise strips it. """
parts = idstr.split('v')
return parts[0]
# "1511.08198v1" is an example of a valid arxiv id that we accept
def isvalidid(pid):
return re.match('^([a-z]+(-[a-z]+)?/)?\d+(\.\d+)?(v\d+)?$', pid)
# database configuration
if os.path.isfile(key_dir('secret_key.txt')):
SECRET_KEY = open(key_dir('secret_key.txt'), 'r').read()
else:
SECRET_KEY = 'devkey, should be in a file'
# AWS_ACCESS_KEY = open(key_dir('AWS_ACCESS_KEY.txt'), 'r').read().strip()
# AWS_SECRET_KEY = open(key_dir('AWS_SECRET_KEY.txt'), 'r').read().strip()
ES_USER = open(key_dir('ES_USER.txt'), 'r').read().strip()
ES_PASS = open(key_dir('ES_PASS.txt'), 'r').read().strip()
es_host = es_host = '0638598f91a536280b20fd25240980d2.us-east-1.aws.found.io'
# log_AWS_ACCESS_KEY = open(key_dir('log_AWS_ACCESS_KEY.txt'), 'r').read().strip()
# log_AWS_SECRET_KEY = open(key_dir('log_AWS_SECRET_KEY.txt'), 'r').read().strip()
CLOUDFRONT_URL = 'https://d3dq07j9ipgft2.cloudfront.net/'
with open(shared_dir("all_categories.json"), 'r') as cats:
CATS_JSON = json.load(cats)
ALL_CATEGORIES = [ cat['c'] for cat in CATS_JSON]
# jwskey = jwk.JWK.generate(kty='oct', size=256)
cache_key = open(key_dir('cache_key.txt'), 'r').read().strip()
AUTO_CACHE = False
user_features = True
user_interactivity = False
print('read in AWS keys')
app = Flask(__name__, static_folder=os.path.join("..","static"))
app.config.from_object(__name__)
# limiter = Limiter(app, default_limits=["1000 per hour", "20 per minute"])
# -----------------------------------------------------------------------------
# utilities for database interactions
# -----------------------------------------------------------------------------
# to initialize the database: sqlite3 as.db < schema.sql
def connect_db():
sqlite_db = sqlite3.connect(database_path)
sqlite_db.row_factory = sqlite3.Row # to return dicts rather than tuples
return sqlite_db
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = g.db.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = query_db('select user_id from user where username = ?',
[username], one=True)
return rv[0] if rv else None
def get_username(user_id):
"""Convenience method to look up the username for a user."""
rv = query_db('select username from user where user_id = ?',
[user_id], one=True)
return rv[0] if rv else None
# -----------------------------------------------------------------------------
# connection handlers
# -----------------------------------------------------------------------------
@app.before_request
def before_request():
g.libids = None
# this will always request database connection, even if we dont end up using it ;\
g.db = connect_db()
# retrieve user object from the database if user_id is set
g.user = None
if 'user_id' in session:
g.user = query_db('select * from user where user_id = ?',
[session['user_id']], one=True)
added = addUserSearchesToCache()
if added:
print('addUser fired from before_request')
# g.libids = None
if g.user:
if 'libids' in session:
g.libids = session['libids']
else:
update_libids()
def update_libids():
uid = session['user_id']
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = [strip_version(x['paper_id']) for x in user_library]
session['libids'] = libids
g.libids = libids
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
#------------------------------------------------------
# Pass data to client
#------------------------------------------------------
def render_date(timestr):
timestruct = dateutil.parser.parse(timestr)
rendered_str = '%s %s %s' % (timestruct.day, timestruct.strftime('%b'), timestruct.year)
return rendered_str
def encode_hit(p, send_images=True, send_abstracts=True):
pid = str(p['rawid'])
idvv = '%sv%d' % (p['rawid'], p['paper_version'])
struct = {}
if 'havethumb' in p:
struct['havethumb'] = p['havethumb']
struct['title'] = p['title']
struct['pid'] = idvv
struct['rawpid'] = p['rawid']
struct['category'] = p['primary_cat']
struct['authors'] = [a for a in p['authors']]
struct['link'] = p['link']
if 'abstract' in p:
struct['abstract'] = p['abstract']
# print(p.to_dict())
# exit()
if send_images:
# struct['img'] = '/static/thumbs/' + idvv.replace('/','') + '.pdf.jpg'
struct['img'] = CLOUDFRONT_URL + 'thumbs/' + pid.replace('/','') + '.pdf.jpg'
struct['tags'] = [t for t in p['cats']]
# struct['tags'] = [t['term'] for t in p['tags']]
# render time information nicely
struct['published_time'] = render_date(p['updated'])
struct['originally_published_time'] = render_date(p['published'])
# fetch amount of discussion on this paper
struct['num_discussion'] = 0
# arxiv comments from the authors (when they submit the paper)
# cc = p.get('arxiv_comment', '')
if 'arxiv_comment' in p:
cc = p['arxiv_comment']
else:
cc = ""
if len(cc) > 100:
cc = cc[:100] + '...' # crop very long comments
struct['comment'] = cc
return struct
def add_user_data_to_hit(struct):
libids = set()
if g.libids:
libids = set(g.libids)
struct['in_library'] = 1 if struct['rawpid'] in libids else 0
return struct
def getResults(search):
search_dict = search.to_dict()
query_hash = make_hash(search_dict)
print(query_hash)
# query_hash = 0
have = False
with cached_queries_lock:
if query_hash in cached_queries:
d = cached_queries[query_hash]
list_of_ids = d["list_of_ids"]
meta = d["meta"]
have = True
# temp disable caching
# print("remember, caching disabled for testing")
# have = False
if not have:
es_response = search.execute()
meta = get_meta_from_response(es_response)
list_of_ids = process_query_to_cache(query_hash, es_response, meta)
with cached_docs_lock:
records = []
for _id in list_of_ids:
doc = cached_docs[_id]
if list_of_ids[_id]:
if "score" in list_of_ids[_id]:
doc.update({'score' : list_of_ids[_id]["score"]})
if "explain_sentence" in list_of_ids[_id]:
doc.update({'explain_sentence' : list_of_ids[_id]["explain_sentence"]})
records.append(doc)
records = [add_user_data_to_hit(r) for r in records]
return records, meta
# def test_hash_speed():
# {'size': 10, 'query': {'match_all': {}}, 'sort': [{'updated': {'order': 'desc'}}], 'from': 0}
# -----------------------------------------------------------------------------
# Build and filter query
# -----------------------------------------------------------------------------
def cat_filter(groups_of_cats):
filt_q = Q()
for group in groups_of_cats:
if len(group)==1:
filt_q = filt_q & Q('term', cats=group[0])
elif len(group) > 1:
# perform an OR filter among the different categories in this group
filt_q = filt_q & Q('terms', cats=group)
return filt_q
def prim_filter(prim_cat):
filt_q = Q()
if prim_cat != "any":
filt_q = Q('term', primary_cat=prim_cat)
return filt_q
def time_filter(time):
filt_q = Q()
if time == "all":
return filt_q
if time in ["3days" , "week" , "day" , "month" , "year"]:
filt_q = filt_q & getTimeFilterQuery(time)
else:
filt_q = filt_q & Q('range', updated={'gte': time['start'] })
filt_q = filt_q & Q('range', updated={'lte': time['end'] })
return filt_q
def ver_filter(v1):
filt_q = Q()
if v1:
filt_q = filt_q & Q('term', paper_version=1)
return filt_q
def lib_filter(only_lib):
filt_q = Q()
if only_lib:
# filt_q = Q('ids', type="paper", values= papers_from_library())
pids = ids_from_library()
if pids:
filt_q = Q('bool', filter=[Q('terms', _id=pids)])
# filt_q = filt_q & Q('term', paper_version=1)
return filt_q
def extract_query_params(query_info):
query_info = sanitize_query_object(query_info)
search = Search(using=es, index='arxiv_pointer')
tune_dict = None
weights = None
pair_fields = None
if 'rec_tuning' in query_info:
if query_info['rec_tuning'] is not None:
rec_tuning = query_info['rec_tuning']
weights = rec_tuning.pop('weights', None)
pair_fields= rec_tuning.pop('pair_fields', None)
tune_dict = rec_tuning
# add query
auth_query = None
query_text = None
sim_to_ids = None
rec_lib = False
bad_search = False
lib_ids = ids_from_library()
if 'rec_lib' in query_info:
rec_lib = query_info['rec_lib']
if query_info['query'].strip() != '':
query_text = query_info['query'].strip()
if 'author' in query_info:
if query_info['author'].strip() != '':
auth_query = query_info['author'].strip()
if 'sim_to' in query_info:
sim_to_ids = query_info['sim_to']
if (not sim_to_ids) and (not rec_lib):
search = search.extra(explain=True)
queries = []
# if query_text:
# queries.append(get_simple_search_query(query_text, weights = weights))
if rec_lib:
if lib_ids:
queries.append(get_sim_to_query(lib_ids, tune_dict = tune_dict, weights = weights))
else:
bad_search = True
if sim_to_ids:
queries.append(get_sim_to_query(sim_to_ids, tune_dict = tune_dict, weights = weights))
if query_text:
# search = search.sort('_score')
# print("sorting by score")
if len(queries) > 0:
q = Q("bool", must=get_simple_search_query(query_text), should = queries)
search = search.query(q)
else:
q = get_simple_search_query(query_text)
search = search.query(q)
else:
if len(queries) > 0:
if len(queries) == 1:
q = queries[0]
else:
q = Q("bool", should = queries)
search = search.query(q)
else:
search = search.sort('-updated')
# if rec_lib and lib_ids:
# re_q = get_sim_to_query(lib_ids, tune_dict = {'max_query_terms' : 25, 'minimum_should_match': '1%'})
# search = search.extra(rescore={'window_size': 100, "query": {"rescore_query": re_q.to_dict()}})
# print('%s queries' % len(queries))
# if not (queries):
# search = search.sort('-updated')
# elif len(queries)==1:
# print(queries)
# q = queries[0]
# search = search.query(q)
# elif len(queries)>1:
# if query_text:
# q = Q("bool", must=get_simple_search_query(query_text), should = queries, disable_coord =True)
# search = search.query(q)
# print('search dict:')
# print(search.to_dict())
# get filters
Q_lib = Q()
if 'only_lib' in query_info:
Q_lib = lib_filter(query_info['only_lib'])
# print('lib_ids = %s' % lib_ids)
if query_info['only_lib'] and (not lib_ids):
bad_search = True
Q_cat = Q()
if 'category' in query_info:
Q_cat = cat_filter(query_info['category'])
Q_prim = Q()
if 'primaryCategory' in query_info:
Q_prim = prim_filter(query_info['primaryCategory'])
Q_time = Q()
if 'time' in query_info:
Q_time = time_filter(query_info['time'])
Q_v1 = Q()
if 'v1' in query_info:
Q_v1= ver_filter(query_info['v1'])
if bad_search:
search = None
return search, Q_cat, Q_prim, Q_time, Q_v1, Q_lib
def get_weighted_list_of_fields(weights):
fields = ['fulltext', 'title', 'abstract', 'all_authors']
# for now, just return fields because the boosting doesn't work
return fields
# if weights == None:
# print('no weights')
# return fields
# else:
# weighted_list = [f + '^' + str(weights[f]) for f in weights]
# leftover_fields = [f for f in fields if f not in weights]
# full_list = weighted_list + leftover_fields
# return full_list
def get_simple_search_query(string, weights = None):
return Q("simple_query_string", query=string, default_operator = "AND", \
fields=get_weighted_list_of_fields(weights) + ['_id'])
def get_sim_to_query(pids, tune_dict = None, weights = None):
dlist = [ makepaperdict(strip_version(v)) for v in pids ]
if tune_dict:
q = Q("more_like_this", stop_words = stop_words, **tune_dict, like=dlist, fields=get_weighted_list_of_fields(weights), include=False)
else:
q = Q("more_like_this",stop_words = stop_words, like=dlist, fields=get_weighted_list_of_fields(weights), include=False)
return q
def build_query(query_info):
search, Q_cat, Q_prim, Q_time, Q_v1, Q_lib = extract_query_params(query_info)
# add filters
if search:
search = search.filter(Q_cat & Q_prim & Q_time & Q_v1 & Q_lib)
return search
def add_counts_aggs(search, Q_cat, Q_prim, Q_time, Q_v1, Q_lib):
Q_lib_on = lib_filter(True)
# define and add the aggregations, each filtered by all the filters except
# variables corresopnding to what the aggregation is binning over
prim_agg = A('terms', field='primary_cat')
prim_filt = A('filter', filter=(Q_cat & Q_time & Q_v1 & Q_lib) )
search.aggs.bucket("prim_filt",prim_filt).bucket("prim_agg", prim_agg)
year_filt = A('filter', filter = (Q_cat & Q_prim & Q_v1 & Q_lib))
year_agg = A('date_histogram', field='published', interval="year")
search.aggs.bucket('year_filt', year_filt).bucket('year_agg', year_agg)
in_filt = A('filter', filter=(Q_cat & Q_prim & Q_time & Q_v1 & Q_lib))
in_agg = A('terms', field='cats')
search.aggs.bucket('in_filt', in_filt).bucket('in_agg',in_agg)
time_filt = A('filter', filter = (Q_cat & Q_prim & Q_v1 & Q_lib))
cutoffs = getTimesForFilters()
time_agg = A('date_range', field='updated', ranges = [{"to" : "now", "key" : "alltime"}, \
{"from" : cutoffs["year"], "key" : "year"}, \
{"from" : cutoffs["month"], "key" : "month"}, \
{"from" : cutoffs["week"], "key" : "week"}, \
{"from" : cutoffs["3days"], "key" : "3days"}, \
{"from" : cutoffs["day"], "key" : "day"}])
search.aggs.bucket('time_filt', time_filt).bucket('time_agg', time_agg)
lib_filt = A('filter', filter=(Q_cat & Q_prim & Q_time & Q_v1 & Q_lib))
lib_agg = A('filters', filters= {"in_lib" : Q_lib_on, "out_lib": ~(Q_lib_on)})
search.aggs.bucket('lib_filt', lib_filt).bucket('lib_agg', lib_agg)
return search
def build_slow_meta_query(query_info):
search, Q_cat, Q_prim, Q_time, Q_v1, Q_lib = extract_query_params(query_info)
if not search:
return None
sig_filt = A('filter', filter=(Q_cat & Q_prim & Q_time & Q_v1 & Q_lib))
sampler_agg = A('sampler', shard_size=200)
auth_agg = A('significant_terms', field='authors')
search.aggs.bucket('sig_filt', sig_filt).bucket('sampler_agg', sampler_agg).bucket('auth_agg', auth_agg)
# keywords_agg = A('significant_terms', field='abstract')
# search.aggs['sig_filt']['sampler_agg'].bucket('keywords_agg', keywords_agg)
return search
def build_explain_query(query_info, id):
search, Q_cat, Q_prim, Q_time, Q_v1, Q_lib = extract_query_params(query_info)
if search:
search = search.extra(explain=True)
search = search.filter('term', _id=id)
return search
def build_meta_query(query_info):
search, Q_cat, Q_prim, Q_time, Q_v1, Q_lib = extract_query_params(query_info)
if not search:
return None
search = add_counts_aggs(search, Q_cat, Q_prim, Q_time, Q_v1, Q_lib)
return search
def parse_author_name(name_in):
name_out = re.sub('(\(.*$)|(\(.*?\))', '', name_in)
name_out = re.sub('^.*?\)', '', name_out)
name_out = name_out.strip()
# if name_out is not name_in:
# print(name_in)
# print(name_out)
return name_out
def get_meta_from_response(response):
meta = {}
if "aggregations" in response:
if "sig_filt" in response.aggregations:
if "sampler_agg" in response.aggregations.sig_filt:
if "auth_agg" in response.aggregations.sig_filt.sampler_agg:
auth_data = {}
for buck in response.aggregations.sig_filt.sampler_agg.auth_agg.buckets:
name = parse_author_name(buck.key)
score = buck.score
if name != '':
auth_data[name] = score
meta["auth_data"] = auth_data
if "keywords_agg" in response.aggregations.sig_filt.sampler_agg:
keyword_data = {}
for buck in response.aggregations.sig_filt.sampler_agg.keywords_agg.buckets:
keyword = buck.key
keyword_data[keyword] = buck.score
meta["keyword_data"] = keyword_data
if "lib_filt" in response.aggregations:
bucks = response.aggregations.lib_filt.lib_agg.buckets
lib_data = {"in_lib" : bucks.in_lib.doc_count, "out_lib" : bucks.out_lib.doc_count}
meta["lib_data"] = lib_data
if "year_filt" in response.aggregations:
date_hist_data = {}
for x in response.aggregations.year_filt.year_agg.buckets:
timestamp = round(x.key/1000)
num_results = x.doc_count
date_hist_data[timestamp] = num_results
meta["date_hist_data"] = date_hist_data
if "prim_filt" in response.aggregations:
prim_data = {}
for prim in response.aggregations.prim_filt.prim_agg.buckets:
cat = prim.key
num_results = prim.doc_count
prim_data[cat] = num_results
meta["prim_data"] = prim_data
if "in_filt" in response.aggregations:
in_data = {}
for buck in response.aggregations.in_filt.in_agg.buckets:
cat = buck.key
num_results = buck.doc_count
in_data[cat] = num_results
meta["in_data"] = in_data
if "time_filt" in response.aggregations:
time_filter_data = {}
for buck in response.aggregations.time_filt.time_agg.buckets:
time_range=buck.key
num_results=buck.doc_count
time_filter_data[time_range] = num_results
meta["time_filter_data"] = time_filter_data
return meta
@app.route('/_explainscore', methods=['POST'])
def _getexplanation():
data = request.get_json()
query_info = data['query']
search = build_explain_query(query_info, id)
if not search:
return jsonify({})
search.source(includes=[])
search = search[0:1]
results = search.execute()
print(results)
try:
hit = results.hits[0]
expl = hit.meta['explanation']
except Exception:
print("no hits for explain query")
expl = {}
print('expl:')
print(expl)
return jsonify(expl)
def testexpl(query_info, id):
search = build_explain_query(query_info, id)
if not search:
return jsonify({})
search.source(includes=[])
search = search[0:1]
results = search.execute()
print(results)
try:
hit = results.hits[0]
expl = hit.meta['explanation']
except Exception:
print("no hits for explain query")
expl = {}
print('expl:')
print(expl)
@app.route('/_getmeta', methods=['POST'])
def _getmeta():
data = request.get_json()
query_info = data['query']
search = build_meta_query(query_info)
if not search:
return jsonify({})
search = search[0:0]
papers, meta = getResults(search)
return jsonify(meta)
@app.route('/_getslowmeta', methods=['POST'])
def _getslowmeta():
slow_meta = False
if slow_meta:
data = request.get_json()
query_info = data['query']
search = build_slow_meta_query(query_info)
if not search:
return jsonify({})
search = search[0:0]
papers, meta = getResults(search)
print("slow meta arrived")
else:
meta = {'Sorry I turned off slow meta' : "it's to slow..."}
return jsonify(meta)
def testmeta(query_info):
search = build_meta_query(query_info)
search = search[0:0]
papers, meta = getResults(search)
print("meta:")
print(meta)
# print("meta-papers:")
# print(papers)
# print("meta-meta:")
# print(meta)
def testslowmeta(query_info):
search = build_slow_meta_query(query_info)
search = search[0:0]
papers, meta = getResults(search)
print("slowmeta:")
print(meta)
@app.route('/_getpapers', methods=['POST'])
def _getpapers():
print("getting papers")
data = request.get_json()
start = data['start_at']
number = data['num_get']
dynamic = data['dyn']
query_info = data['query']
#need to build the query from the info given here
search = build_query(query_info)
if not search:
return jsonify(dict(papers=[],dynamic=dynamic, start_at=start, num_get=number, tot_num_papers=0))
search = search.source(includes=['havethumb','rawid','paper_version','title','primary_cat', 'authors', 'link', 'abstract', 'cats', 'updated', 'published','arxiv_comment'])
search = search[start:start+number]
search.search_type="dfs_query_then_fetch"
tot_num_papers = search.count()
# print(tot_num_papers)
log_dict = {}
log_dict.update(search= search.to_dict())
log_dict.update(client_ip = request.remote_addr)
log_dict.update(client_route = request.access_route)
if 'X-Real-IP' in request.headers:
log_dict.update(client_x_real_ip = request.headers['X-Real-IP'])
access_log.info("ES search request", extra=log_dict )
if 'user_id' in session:
uid = session['user_id']
user_log.info('User fired search', extra=dict(search =search.to_dict(), uid = uid, library = ids_from_library() ))
# access_log.info(msg="ip %s sent ES search fired: %s" % search.to_dict())
print(search.to_dict())
papers, meta = getResults(search)
# testexpl(query_info,papers[0]['rawpid'])
# scored_papers = 0
# tot_score = 0
# max_score = 0
# for p in papers:
# if "score" in p:
# scored_papers +=1
# tot_score += p["score"]
# if p["score"] > max_score:
# max_score = p["score"]
# if scored_papers > 0:
# avg_score = tot_score/scored_papers
# print("avg_score")
# print(avg_score)
# print("max_score")
# print(max_score)
# print('done papers')
# testmeta(query_info)
# testslowmeta(query_info)
return jsonify(dict(papers=papers,dynamic=dynamic, start_at=start, num_get=number, tot_num_papers=tot_num_papers))
#----------------------------------------------------------------
# Sanitize data from the client
#----------------------------------------------------------------
# from
# https://gist.github.com/eranhirs/5c9ef5de8b8731948e6ed14486058842
def sanitize_string(text):
# Escape special characters
# http://lucene.apache.org/core/old_versioned_docs/versions/2_9_1/queryparsersyntax.html#Escaping Special Characters
text = re.sub('([{}])'.format(re.escape('\\+\-&|!(){}\[\]^~*?:\/')), r"\1", text)
# AND, OR and NOT are used by lucene as logical operators. We need
# to escape them
for word in ['AND', 'OR', 'NOT']:
if word == 'AND':
escaped_word = "+"
elif word == "OR":
escaped_word = "|"
elif word == "NOT":
escaped_word = "-"
# escaped_word = "".join(["\\" + letter for letter in word])
text = re.sub(r'\s*\b({})\b\s*'.format(word), r" {} ".format(escaped_word), text)
# text = re.sub( r"\-(?=\w)", r"+-", text)
# Escape odd quotes
quote_count = text.count('"')
return re.sub(r'(.*)"(.*)', r'\1\"\2', text) if quote_count % 2 == 1 else text
def san_dict(d):
if isinstance(d,dict):
return d
else:
return {}
def san_dict_value(dictionary, key, typ, valid_options):
if key in dictionary:
value = dictionary[key]
if not isinstance(value, typ):
dictionary.pop(key, None)
# print("popped value")
# print(value)
elif not (value in valid_options):
dictionary.pop(key,None)
# print("popped value")
# print(value)
return dictionary
def san_dict_bool(dictionary, key):
if key in dictionary:
value = dictionary[key]
if not isinstance(value, bool):
dictionary.pop(key, None)
return dictionary
def san_dict_str(dictionary, key):
if key in dictionary:
value = dictionary[key]
if not isinstance(value, str):
dictionary.pop(key, None)
else:
dictionary[key] = sanitize_string(value)
return dictionary
def san_dict_num(dictionary, key):
if key in dictionary:
value = dictionary[key]
if not (isinstance(value, int) or isinstance(value,float)):
dictionary.pop(key, None)
return dictionary
def san_dict_int(dictionary, key):
if key in dictionary:
value = dictionary[key]
if isinstance(value,float):
value = int(round(value))
dictionary[key] = value
if not isinstance(value, int):
dictionary.pop(key, None)
return dictionary
def san_dict_keys(dictionary, valid_keys):
dictionary = san_dict(dictionary)
dictionary = { key: dictionary[key] for key in valid_keys if key in dictionary}
return dictionary
def valid_list_of_cats(group):
valid_list = True
if not isinstance(group, list):
valid_list = False
else:
valid_list = all( [g in ALL_CATEGORIES for g in group])
return valid_list
def sanitize_pid_list(list_of_pids):
list_of_pids = [ p for p in list_of_pids if isinstance(p,str)]
list_of_pids = [ p for p in list_of_pids if isvalid(p)]
return list_of_pids
def san_dict_list_pids(dictionary, key):
if key in dictionary:
value = dictionary[key]
if not isinstance(value, list):
dictionary.pop(key, None)
else:
dictionary[key] = sanitize_pid_list(value)
return dictionary
def sanitize_rec_tuning_object(rec_tuning):
valid_keys = ['weights', 'max_query_terms', 'min_doc_freq', 'max_doc_freq', 'minimum_should_match', 'boost_terms','pair_fields']
rec_tuning = san_dict_keys(rec_tuning, valid_keys)
if 'weights' in rec_tuning:
w = rec_tuning['weights']
w_valid_keys = ['fulltext', 'title', 'abstract', 'all_authors']
w = san_dict_keys(w, w_valid_keys)
for key in w_valid_keys:
w = san_dict_num(w,key)
rec_tuning['weights'] = w
rec_tuning = san_dict_int(rec_tuning, 'max_query_terms' )
rec_tuning = san_dict_int(rec_tuning, 'min_doc_freq' )
rec_tuning = san_dict_int(rec_tuning, 'max_doc_freq' )
# special for max_doc_freq:
if 'max_doc_freq' in rec_tuning:
if rec_tuning['max_doc_freq'] < 1:
rec_tuning.pop('max_doc_freq', None)
# min should match
if 'minimum_should_match' in rec_tuning:
m = rec_tuning['minimum_should_match']
if isinstance(m, int) or isinstance(m, float):
rec_tuning = san_dict_int(rec_tuning,'minimum_should_match' )
elif isinstance(m,str):
m = re.fullmatch(r'-?\d{1,2}?%',m)
if not m:
rec_tuning.pop('minimum_should_match', None)
else:
rec_tuning.pop('minimum_should_match', None)
rec_tuning = san_dict_num(rec_tuning, 'boost_terms' )
rec_tuning = san_dict_bool(rec_tuning, 'pair_fields' )
return rec_tuning
def sanitize_query_object(query_info):
valid_keys = ['query', 'rec_lib', 'category', 'time', 'primaryCategory', 'author','v1', 'only_lib', 'sim_to', 'rec_tuning']
query_info = san_dict_keys(query_info, valid_keys)
if 'rec_tuning' in query_info:
query_info['rec_tuning'] = sanitize_rec_tuning_object(query_info['rec_tuning'])
if 'category' in query_info:
cats = query_info['category']
if not isinstance(cats,list):
query_info.pop('category')
for group in cats:
if not valid_list_of_cats(group):
cats.remove(group)
query_info = san_dict_value(query_info, 'primaryCategory', str, ALL_CATEGORIES)
query_info = san_dict_str(query_info, 'query')
query_info = san_dict_str(query_info, 'author')
query_info = san_dict_list_pids(query_info, 'sim_to')
query_info = san_dict_bool(query_info, 'rec_lib')
query_info = san_dict_value(query_info, 'primaryCategory', str, ALL_CATEGORIES)
query_info = san_dict_bool(query_info, 'only_lib')
query_info = san_dict_bool(query_info, 'v1')
if 'time' in query_info:
time = query_info['time']
if isinstance(time, dict):
time = san_dict_keys(time, ['start','end'])
time = san_dict_int(time, 'start')
time = san_dict_int(time, 'end')
if not ( ('start' in time) and ('end' in time)):
query_info.pop('time',None)
else:
valid_times = ["3days" , "week" , "day" , "all" , "month" , "year"]
query_info = san_dict_value(query_info, 'time', str, valid_times)
return query_info
#--------------------------------------------------------
# Caching
#--------------------------------------------------------
def make_hash(o):
"""
Makes a hash from a dictionary, list, tuple or set to any level, that contains
only other hashable types (including any lists, tuples, sets, and
dictionaries).
"""
if isinstance(o, (set, tuple, list)):
return tuple([make_hash(e) for e in o])
elif not isinstance(o, dict):
return hash(o)
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = make_hash(v)
return hash(tuple(frozenset(sorted(new_o.items()))))
def getExplainSentence(explanation):
if explanation['description'] == 'ConstantScore(*:*)^0.0':
return ""
else:
tot_score = explanation['value']
reasons = []
sentfrags = []
sentfrags, reasons = parseItem(explanation, sentfrags, reasons, '')
sentfrags.sort(key=lambda tup : tup[0], reverse=True)
sl = [ x[1] for x in sentfrags ]
m = 4
if len(sl) > m:
sl = sl[:m-1]
sl.append("...")
if len(sl) >= 1:
it = sl[0]
it = it[1:]
sl[0] = it
# if len(sl)==0:
# return None
return "{0:.2f}".format(tot_score) + ' = ' + ' '.join(sl)
def parseItem(item, sentfrags, reasons, op):
if item is None:
return sentfrags, reasons
try:
desc = item['description']
except Exception:
return sentfrags, reasons
if desc == 'sum of:' or desc == 'max of:':
if desc == 'sum of:':
op = '+'
else:
op = 'v'
for subitem in item["details"]:
sentfrags, reasons = parseItem(subitem, sentfrags, reasons, op)
else:
value = item['value']
newfrags, newreasons = parseReasons(desc,value, op)
reasons = reasons + newreasons
sentfrags = sentfrags + newfrags
return sentfrags, reasons
def parseReasons(desc, value, op):
sentfrags = []
reasons = []
try:
match_obj = re.search(r'weight\(\w+?:.+? in', desc)
if match_obj:
match_str = match_obj.group(0)
get_rid_of_weight = match_str[len('weight('):]
field = get_rid_of_weight.split(":")[0]
word = get_rid_of_weight.split(":")[1][:-1*len(' in')]
sentfrag = ( value , op + ' ' + "{0:.2f}".format(value) + ' (for occurrences of ' + word + ' in the ' + field + ')' )
reasons.append({'word' : word, "field" : field, "value" : value})
sentfrags.append(sentfrag)
except Exception:
print("error")
return sentfrags, reasons
def process_query_to_cache(query_hash, es_response, meta):
list_of_ids = {}
for record in es_response:
score_object = {}
_id = record.meta.id
if "score" in record.meta:
score_object["score"] = record.meta.score
if "explanation" in record.meta:
score_object["explain_sentence"] = getExplainSentence(record.meta.explanation)
list_of_ids[_id] = score_object
with cached_docs_lock:
if _id not in cached_docs:
cached_docs[_id] = encode_hit(record)
with cached_queries_lock:
cached_queries[query_hash] = dict(list_of_ids=list_of_ids,meta=meta)
return list_of_ids
def async_add_to_cache(search):
search_dict = search.to_dict()
query_hash = make_hash(search_dict)
with cached_queries_lock:
check_in = query_hash in cached_queries
if not check_in:
t = threading.Thread(target=add_to_cache, args=(query_hash, search), daemon=True)
t.start()
def add_to_cache(query_hash, search):
with es_query_semaphore:
es_response = search.execute()
meta = get_meta_from_response(es_response)
process_query_to_cache(query_hash, es_response, meta)
def addDefaultSearchesToCache():
return False
def addUserSearchesToCache():
return False
@app.route('/_invalidate_cache')
def _invalidate_cache():
secret = request.args.get('secret', False)
if secret == cache_key:
print('successfully invalidated cache')
flash('successfully invalidated cache')
global cached_queries
with cached_queries_lock:
cached_queries = {}
global list_of_users_cached
with list_of_users_lock:
list_of_users_cached = []
global cached_docs
with cached_docs_lock:
cached_docs = {}
addDefaultSearchesToCache()
return redirect(url_for('intmain'))
#-------------------------------------------------
# Search functions
#-------------------------------------------------
def countpapers():
s = Search(using=es, index="arxiv_pointer")
return s.count()
def makepaperdict(pid):
d = {
"_index" : 'arxiv_pointer',
"_type" : 'paper',
"_id" : pid
}
return d
def add_papers_similar_query(search, pidlist, extra_text = None):
session['recent_sort'] = False
dlist = [ makepaperdict(strip_version(v)) for v in pidlist ]
option = 1
if option == 1:
if extra_text:
dlist.append(extra_text)
q = Q("more_like_this", like=dlist, fields=['fulltext', 'title', 'abstract', 'all_authors'], include=False)
else:
q1 = Q("more_like_this", like=dlist, fields=['fulltext', 'title', 'abstract', 'all_authors'], include=False, boost=.5)
if extra_text:
q2 = get_simple_search_query(extra_text)
q = Q("bool", should = [q1,q2], disable_coord =True)
# else:
# mlts = search
return search.query(q)
if option == 1:
if extra_text:
dlist.append(extra_text)
q = Q("more_like_this", like=dlist, fields=['fulltext', 'title', 'abstract', 'all_authors'], include=False)
else:
q1 = Q("more_like_this", like=dlist, fields=['fulltext', 'title', 'abstract', 'all_authors'], include=False, boost=.5)
if extra_text:
q2 = get_simple_search_query(extra_text)
q = Q("bool", should = [q1,q2], disable_coord =True)
# else:
# mlts = search
return search.query(q)
# def get_simple_search_query(string):
# return Q("simple_query_string", query=string, default_operator = "AND", \
# fields=['title','abstract', 'fulltext', 'all_authors', '_id'])
def ids_from_library():
if g.libids:
out = g.libids
else:
out = None
return out
def add_rec_query(search, extra_text = None):
# libids = []
if g.libids:
out = add_papers_similar_query(search, g.libids, extra_text)
else:
out = add_papers_similar_query(search, [], extra_text)
return out
#---------------------------------------------
# Endpoints
#----------------------------------------
def default_context(**kws):
return kws
@app.route("/")
def intmain():
ctx = default_context()
return render_template('main.html', **ctx)
def getpaper(pid):
return Search(using=es, index="arxiv_pointer").query("match", _id=pid)
def isvalid(pid):
return not (getpaper(pid).count() == 0)
@app.route('/libtoggle', methods=['POST'])
def review():
""" user wants to toggle a paper in his library """
# make sure user is logged in
if not g.user:
return 'FAIL' # fail... (not logged in). JS should prevent from us getting here.
data = request.get_json()
idvv = data['pid'] # includes version
pid = strip_version(idvv)
if not isvalid(pid):
return 'FAIL' # we don't know this paper. wat
uid = session['user_id'] # id of logged in user
# check this user already has this paper in library
record = query_db('''select * from library where
user_id = ? and paper_id = ?''', [uid, pid], one=True)
# print(record)
ret = "FAIL"
if record:
# record exists, erase it.
g.db.execute('''delete from library where user_id = ? and paper_id = ?''', [uid, pid])
g.db.commit()
#print('removed %s for %s' % (pid, uid))
ret = False
else:
# record does not exist, add it.
g.db.execute('''insert into library (paper_id, user_id, update_time) values (?, ?, ?)''',
[pid, uid, int(time.time())])
g.db.commit()
#print('added %s for %s' % (pid, uid))
ret = True
with list_of_users_lock:
if uid in list_of_users_cached:
list_of_users_cached.remove(uid)
update_libids()
addUserSearchesToCache()
if ret:
user_log.info('User added paper to their library', extra=dict(uid = uid, pid =pid, added_paper =True, removed_paper = False, library = ids_from_library() ))
else:
user_log.info('User removed paper from their library', extra=dict(uid = uid, pid =pid, added_paper =False, removed_paper = True, library = ids_from_library() ))
return jsonify(dict(on=ret))
@app.route('/login', methods=['POST'])
def login():
""" logs in the user. if the username doesn't exist creates the account """
if not request.form['username']:
flash('You have to enter a username')
elif not request.form['password']:
flash('You have to enter a password')
elif get_user_id(request.form['username']) is not None:
# username already exists, fetch all of its attributes
user = query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if check_password_hash(user['pw_hash'], request.form['password']):
# password is correct, log in the user
session['user_id'] = get_user_id(request.form['username'])
added = addUserSearchesToCache()
if added:
print('addUser fired')
flash('User ' + request.form['username'] + ' logged in.')
else:
# incorrect password
flash('User ' + request.form['username'] + ' already exists, wrong password.')
else:
# create account and log in
creation_time = int(time.time())
g.db.execute('''insert into user (username, pw_hash, creation_time) values (?, ?, ?)''',
[request.form['username'],
generate_password_hash(request.form['password']),
creation_time])
user_id = g.db.execute('select last_insert_rowid()').fetchall()[0][0]
g.db.commit()
session['user_id'] = user_id
flash('New account %s created' % (request.form['username'], ))
return redirect(url_for('intmain'))
@app.route('/logout')
def logout():
# session.pop('user_id', None)
session.clear()
# flash('You were logged out')
return redirect(url_for('intmain'))
# @app.route('/static/<path:path>')
# def send_static(path):
# return send_from_directory('static', path)
# test change
#--------------------------------
# Times and time filters
#--------------------------------
def getNextArXivPublishCutoff(time):
dow = time.weekday()
# Most days of the week, either go to 18h later that day, or 18h the next day
if dow == 0 or dow == 1 or dow == 2 or dow == 3:
cutoff = time.replace(hour=14,minute=0,second=0,microsecond=0)
if cutoff < time:
cutoff = cutoff + timedelta(days=1)
# Friday morning, go to Friday night. Friday > 14h, go to Monday 14h.
if dow == 4:
cutoff = time.replace(hour=14,minute=0,second=0,microsecond=0)
if cutoff < time:
cutoff = cutoff + timedelta(days=3)
if dow == 5:
# any time on Saturday, go back to 14h on Monday
cutoff = time.replace(hour=14,minute=0,second=0,microsecond=0) + timedelta(days=2)
if dow == 6:
# any time on Sunday, go back to 14h on Monday
cutoff = time.replace(hour=14,minute=0,second=0,microsecond=0) + timedelta(days=1)
return cutoff
def getLastArXivPublishCutoff(time):
dow = time.weekday()
# Most days of the week, either go to 18h earlier, or 18h the day before
if dow == 1 or dow == 2 or dow == 3 or dow == 4:
cutoff = time.replace(hour=14,minute=0,second=0,microsecond=0)
if cutoff > time:
cutoff = cutoff + timedelta(days=-1)
# Monday morning, go back to Friday night. Monday > 14h, go to Monday 14h.
if dow == 0:
cutoff = time.replace(hour=14,minute=0,second=0,microsecond=0)
if cutoff > time:
cutoff = cutoff + timedelta(days=-3)
if dow == 5:
# any time on Saturday, go back to 14h on Friday
cutoff = time.replace(hour=14,minute=0,second=0,microsecond=0) + timedelta(days=-1)
if dow == 6:
# any time on Sunday, go back to 14h on Friday
cutoff = time.replace(hour=14,minute=0,second=0,microsecond=0) + timedelta(days=-2)
return cutoff
def getLastCutoffs():
tz = timezone('America/New_York')
now = datetime.now(tz)
global arxiv_invalidation_time
if now > arxiv_invalidation_time:
cutoffs = []
cutoffs.append(getLastArXivPublishCutoff(now))
for j in range(1,10):
cutoffs.append(getLastArXivPublishCutoff(cutoffs[-1] + timedelta(hours=-1)))
arxiv_invalidation_time = getNextArXivPublishCutoff(now)
global arxiv_cutoffs
arxiv_cutoffs= cutoffs
else:
cutoffs = arxiv_cutoffs
# print('--------')
# for c in cutoffs:
# rendered_str = '%s %s %s' % (c.day, c.strftime('%b'), c.year)
# print(rendered_str)
# print('--------')
return cutoffs
def getTimesForFilters():
legend = {'day':1, '3days':3, 'week':5, 'month':30, 'year':365}
cutoffs = {}
for day,cutoff in legend.items():
cutoffs[day] = get_time_for_days_ago(cutoff)
return cutoffs
def get_time_for_days_ago(days):
now = datetime.now(tzutc())
if days < 10:
cutoffs= getLastCutoffs()
cutoff = cutoffs[days+1]
else:
# account for published vs announced
back = now + timedelta(days=-1) +timedelta(days=-1*days)
back18 = back.replace(hour=18,minute=0,second=0,microsecond=0)
if back > back18:
back = back18
if back < back18:
back = back18 + timedelta(days=-1)
cutoff = back+ timedelta(seconds = -1)
time_cutoff = round(cutoff.timestamp()* 1000)
return time_cutoff
def getTimeFilterQuery(ttstr) :
legend = {'day':1, '3days':3, 'week':5, 'month':30, 'year':365}
if ttstr not in legend:
return Q()
# legend = {'new':1, 'recent':5, 'week':7, 'month':30, 'year':365}
tt = legend.get(ttstr, 7)
time_cutoff = get_time_for_days_ago(tt)
return Q('range', updated={'gte': time_cutoff })
def applyTimeFilter(search, ttstr):
return search.post_filter(getTimeFilterQuery(ttstr))
# -----------------------------------------------------------------------------
# int main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
now = datetime.now(tzutc())
arxiv_invalidation_time = now
arxiv_cutoffs= []
getLastCutoffs()
parser = argparse.ArgumentParser()
# parser.add_argument('-p', '--prod', dest='prod', action='store_true', help='run in prod?')
parser.add_argument('-r', '--num_results', dest='num_results', type=int, default=200, help='number of results to return per query')
parser.add_argument('--port', dest='port', type=int, default=8500, help='port to serve on')
args = parser.parse_args()
print(args)
if not os.path.isfile(database_path):
print('did not find as.db, trying to create an empty database from schema.sql...')
print('this needs sqlite3 to be installed!')
os.system('sqlite3 ' + database_path + ' < ' + schema_path)
# os.system('chmod a+rwx as.db')
print('connecting to elasticsearch...')
context = elasticsearch.connection.create_ssl_context(cafile=certifi.where())
es = Elasticsearch(
["https://%s:%s@%s:9243" % (ES_USER,ES_PASS,es_host)], scheme="https", ssl_context=context, timeout=30)
# print(es.info())
# m = Mapping.from_es('arxiv', 'paper', using=es)
# print(m.authors)
# APP_NAME = 'Python Server'
# ES_LOG_USER = open(key_dir('ES_LOG_USER.txt'), 'r').read().strip()
# ES_LOG_PASS = open(key_dir('ES_LOG_PASS.txt'), 'r').read().strip()
# es_host = '0638598f91a536280b20fd25240980d2.us-east-1.aws.found.io'
# ES_log_handler = CMRESHandler(hosts=[{'host': es_host, 'port': 9243}],
# auth_type=CMRESHandler.AuthType.BASIC_AUTH,
# auth_details=(ES_LOG_USER,ES_LOG_PASS),
# es_index_name="python_logger",
# index_name_frequency=CMRESHandler.IndexNameFrequency.MONTHLY,
# es_additional_fields={'App': APP_NAME},
# use_ssl=True)
cached_docs = {}
cached_queries = {}
max_connections = 2
es_query_semaphore = threading.BoundedSemaphore(value=max_connections)
cached_queries_lock = threading.Lock()
cached_docs_lock = threading.Lock()
list_of_users_cached = []
list_of_users_lock = threading.Lock()
addDefaultSearchesToCache()
# start
# if args.prod:
# run on Tornado instead, since running raw Flask in prod is not recommended
print('starting tornado!')
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from tornado.log import logging
from tornado.options import options
from tornado import autoreload
class RequestFormatter(logging.Formatter):
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super().format(record)
formatter = RequestFormatter(
'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
'%(levelname)s in %(module)s: %(message)s')
# app.debug = False
options.log_file_prefix = "tornado.log"
# options.logging = "debug"
enable_pretty_logging()
# logging.debug("testlog")
logging.basicConfig(level=logging.INFO)
access_log = logging.getLogger("tornado.access")
access_log.setLevel(logging.INFO)
user_log = logging.getLogger("user_log")
user_fh = logging.FileHandler('user_info.log')
user_log.addHandler(user_fh)
# access_log.addHandler(ES_log_handler)
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
# app_log.addHandler(ES_log_handler)
# gen_log.addHandler(ES_log_handler)
# user_log.addHandler(ES_log_handler)
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(args.port, address='127.0.0.1')
autoreload.start()
for dir, _, files in os.walk(server_dir('templates')):
[autoreload.watch(dir + '/' + f) for f in files if not f.startswith('.')]
for dir, _, files in os.walk(os.path.join('..','static')):
[autoreload.watch(dir + '/' + f) for f in files if not f.startswith('.')]
IOLoop.instance().start()
# # else:
# print('starting flask!')
# app.debug = True
# app.run(port=args.port, host='127.0.0.1')
| StarcoderdataPython |
42317 | <reponame>GGelatin/TekkenBot
#!/usr/bin/env python3
# Copyright (c) 2019, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
from itertools import chain
from win32.defines import (
BYTE, DOUBLE, DWORD, FLOAT, LONG, LONGLONG, QWORD, SBYTE, SHORT, SIZE_OF,
WORD
)
C_UNSIGNED_INT_TYPES_FORMAT = OrderedDict(
[
(BYTE, '<B'), (WORD, '<H'), (DWORD, '<I'), (QWORD, '<Q')
]
)
C_SIGNED_INT_TYPES_FORMAT = OrderedDict(
[
(SBYTE, '<b'), (SHORT, '<h'), (LONG, '<i'), (LONGLONG, '<q')
]
)
C_FLOAT_TYPES_FORMAT = OrderedDict(
[
(FLOAT, '<f'), (DOUBLE, '<d')
]
)
C_ALL_TYPES_FORMAT = OrderedDict(
chain(
C_UNSIGNED_INT_TYPES_FORMAT.items(),
C_SIGNED_INT_TYPES_FORMAT.items(),
C_FLOAT_TYPES_FORMAT.items()
)
)
def __init_limits():
signed_int_limits = [
(c_type, struct_format, __limits(c_type))
for c_type, struct_format in C_SIGNED_INT_TYPES_FORMAT.items()
]
unsigned_int_limits = [
(c_type, struct_format, __limits(c_type))
for c_type, struct_format in C_UNSIGNED_INT_TYPES_FORMAT.items()
]
float_limits = [
(c_type, struct_format, __limits(c_type))
for c_type, struct_format in C_FLOAT_TYPES_FORMAT.items()
]
return (signed_int_limits, unsigned_int_limits, float_limits)
def __limits(c_int_type):
signed = c_int_type(-1).value < c_int_type(0).value
bit_size = SIZE_OF(c_int_type) * 8
signed_limit = 2 ** (bit_size - 1)
if signed:
return (-signed_limit, signed_limit - 1)
return (0, 2 * signed_limit - 1)
__C_SIGNED_INT_LIMITS, __C_UNSIGNED_INT_LIMITS, __C_FLOAT_LIMITS = (
__init_limits()
)
def get_size(value, signed=False):
return __get_size_format(value, signed=signed)[0]
def get_struct_format(value, signed=False):
return __get_size_format(value, signed=signed)[1]
def __get_size_format(value, signed=False):
def find_size_format(value, type_limits):
size_format = None
for limit in type_limits:
if(
limit[2][0] <= value <= limit[2][1]
):
size_format = (
SIZE_OF(limit[0]), limit[1]
)
break
return size_format
size_format = None
if isinstance(value, int):
if not signed:
signed = value < 0
if signed:
size_format = find_size_format(value, __C_SIGNED_INT_LIMITS)
else:
size_format = find_size_format(value, __C_UNSIGNED_INT_LIMITS)
elif isinstance(value, float):
size_format = find_size_format(value, __C_FLOAT_LIMITS)
elif isinstance(value, str):
raise NotImplementedError
else:
raise NotImplementedError
return size_format
| StarcoderdataPython |
8018154 | from typing import Any
from cloud.amazon.common.base_service_generated_instance import BaseSGI
from properties_and_methods import CachedProperty
from types_extensions import void, const, list_type, dict_type
class AmazonS3Bucket(BaseSGI):
def __init__(self, bucket_name: str, parent, exception_level: int) -> void:
super().__init__(exception_level=exception_level)
self.bucket_name: const(str) = bucket_name
self.parent = parent
def set_exception_level(self, new_level: int) -> void:
self.exception_level = new_level
self.invalidate_cached_property_defaults()
@CachedProperty
def defaults(self):
return dict(
bucket_name=self.bucket_name,
apply_format_to_bucket=False,
exception_level=self.exception_level,
)
def _build_default_params(self, kwargs_dict: dict_type[str, Any]) -> dict_type[str, Any]:
return {**kwargs_dict, **self.defaults}
def get_objects(self, mode: str = 'mapping', **kwargs) -> dict:
return self.parent.get_objects_in_bucket(
**self._build_default_params(kwargs),
mode=mode,
)
def put_object(self, object_path: str, **kwargs) -> bool:
return self.parent.put_object_in_bucket(
**self._build_default_params(kwargs),
object_path=object_path,
)
def delete_object(self, object_name: str, **kwargs) -> void:
return self.parent.delete_object_from_bucket(
**self._build_default_params(kwargs),
object_name=object_name,
)
def delete_objects(self, object_names: list_type[str], **kwargs) -> void:
return self.parent.delete_objects_from_bucket(
**self._build_default_params(kwargs),
object_names=object_names,
)
def get_all_object_versions(self, object_name: str, **kwargs) -> list_type[dict_type[str, str]]:
return self.parent.get_all_object_versions(
**self._build_default_params(kwargs),
object_name=object_name
)
def download_object(self, object_name: str, destination: str, **kwargs) -> void:
return self.parent.download_object_from_bucket(
**self._build_default_params(kwargs),
object_name=object_name,
destination=destination
)
| StarcoderdataPython |
11282650 | <filename>sumNnos.py
n = int(input())
print((n*(n+1))//2)
| StarcoderdataPython |
3376132 | import warnings
from complexity_considerations_package.binary_layer import BinaryConv2D
import config
if config.tf:
from tensorflow.keras.layers import (GlobalAveragePooling2D, GlobalMaxPooling2D, Dense,
multiply, add, Permute, Conv2D,
Reshape, BatchNormalization, ELU, MaxPooling2D, Dropout, Lambda)
import tensorflow.keras.backend as K
else:
from keras.layers import (GlobalAveragePooling2D, GlobalMaxPooling2D, Dense,
multiply, add, Permute, Conv2D,
Reshape, BatchNormalization, ELU, MaxPooling2D, Dropout, Lambda)
import keras.backend as K
from tensorflow.keras.regularizers import l2
__authors__ = "<NAME>, <NAME> and <NAME>"
__copyright__ = "Machine Listeners Valencia"
__credits__ = ["Machine Listeners Valencia"]
__license__ = "MIT License"
__version__ = "0.5.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Dev"
__date__ = "2020"
def _obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None):
"""Internal utility to compute/validate a model's tensor shape.
# Arguments
input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: One of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
# Returns
An integer shape tuple (may include None entries).
# Raises
ValueError: In case of invalid argument values.
"""
if weights != 'imagenet' and input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with {input_shape}'
' input channels.'.format(input_shape=input_shape[0]))
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with {n_input_channels}'
' input channels.'.format(n_input_channels=input_shape[-1]))
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting `include_top=True` '
'and loading `imagenet` weights, '
'`input_shape` should be {default_shape}.'.format(default_shape=default_shape))
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
'`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape={input_shape}`'.format(input_shape=input_shape))
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least {min_size}x{min_size};'
' got `input_shape={input_shape}`'.format(min_size=min_size,
input_shape=input_shape))
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
'`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape={input_shape}`'.format(input_shape=input_shape))
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least {min_size}x{min_size};'
' got `input_shape={input_shape}`'.format(min_size=min_size,
input_shape=input_shape))
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
'Got `input_shape={input_shape}`'.format(input_shape=input_shape))
return input_shape
def _tensor_shape(tensor):
if config.tf:
#return getattr(tensor, 'get_shape()')
return tensor.get_shape()
else:
return getattr(tensor, '_keras_shape')
def squeeze_excite_block(input_tensor, index, ratio=16, trident=None):
""" Create a channel-wise squeeze-excite block
Args:
input_tensor: input Keras tensor
ratio: number of output filters
Returns: a Keras tensor
References
- [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
"""
trident_suffix = '' if trident is None else trident
init = input_tensor
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = _tensor_shape(init)[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False,
name='dense_ratio_' + str(index) + trident_suffix)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False,
name='dense_sigmoid_' + str(index) + trident_suffix)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def spatial_squeeze_excite_block(input_tensor, index, binary_layer=False, trident=None):
""" Create a spatial squeeze-excite block
Args:
input_tensor: input Keras tensor
Returns: a Keras tensor
References
- [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579)
:param binary_layer:
"""
trident_suffix = '' if trident is None else trident
if binary_layer is True:
se = BinaryConv2D(1, kernel_size=1, activation='sigmoid', use_bias=False,
kernel_initializer='he_normal',
name='conv1d_' + str(index) + trident_suffix)(input_tensor)
else:
se = Conv2D(1, (1, 1), activation='sigmoid', use_bias=False,
kernel_initializer='he_normal',
name='conv1d_' + str(index) + trident_suffix)(input_tensor)
x = multiply([input_tensor, se])
return x
def channel_spatial_squeeze_excite(input_tensor, index, ratio=16, binary_layer=False, trident=None):
""" Create a spatial squeeze-excite block
Args:
input_tensor: input Keras tensor
ratio: number of output filters
Returns: a Keras tensor
References
- [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
- [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579)
"""
cse = squeeze_excite_block(input_tensor, index, ratio, trident=trident)
sse = spatial_squeeze_excite_block(input_tensor, index, binary_layer=binary_layer, trident=trident)
x = add([cse, sse])
return x
def conv_standard_post(inp, nfilters, ratio, index, pre_act=False, shortcut='conv', binary_layer=False, trident=None):
""" Module presented in https://ieeexplore.ieee.org/abstract/document/9118879
:param inp: input tensor
:param nfilters: number of filter of convolutional layers
:param ratio: parameter for squeeze-excitation module
:param pre_act:
:param shortcut:
:param binary_layer:
:param trident:
:return: tensor
"""
x1 = inp
bn_name = 'bn_' + str(index)
elu_name = 'elu_' + str(index)
conv_name = 'conv_' + str(index)
trident_suffix = '' if trident is None else trident
if pre_act:
x = BatchNormalization(name=bn_name + '_a' + trident_suffix)(inp)
x = ELU(name=elu_name + trident_suffix)(x)
if binary_layer is True:
x = BinaryConv2D(nfilters, kernel_size=3, use_bias=False, padding='same',
name=conv_name + '_a' + trident_suffix)(x)
else:
x = Conv2D(nfilters, 3, padding='same', name=conv_name + '_a' + trident_suffix)(x)
x = BatchNormalization(name=bn_name + '_b' + trident_suffix)(x)
if binary_layer is True:
x = BinaryConv2D(nfilters, kernel_size=3, use_bias=False, padding='same',
name=conv_name + '_b' + trident_suffix)(x)
else:
x = Conv2D(nfilters, 3, padding='same', name=conv_name + '_b' + trident_suffix)(x)
else:
if binary_layer is True:
x = BinaryConv2D(nfilters, kernel_size=3, use_bias=False, padding='same',
name=conv_name + '_a' + trident_suffix)(inp)
else:
x = Conv2D(nfilters, 3, padding='same', name=conv_name + '_a' + trident_suffix)(inp)
x = BatchNormalization(name=bn_name + '_a' + trident_suffix)(x)
x = ELU(name=elu_name + trident_suffix)(x)
if binary_layer is True:
x = BinaryConv2D(nfilters, kernel_size=3, use_bias=False, padding='same',
name=conv_name + '_b' + trident_suffix)(x)
else:
x = Conv2D(nfilters, 3, padding='same', name=conv_name + '_b' + trident_suffix)(x)
x = BatchNormalization(name=bn_name + '_b' + trident_suffix)(x)
if shortcut == 'conv':
if binary_layer is True:
x1 = BinaryConv2D(nfilters, kernel_size=1, use_bias=False, padding='same',
name=conv_name + '_shortcut' + trident_suffix)(x1)
else:
x1 = Conv2D(nfilters, 1, padding='same', name=conv_name + '_shortcut' + trident_suffix)(x1)
x1 = BatchNormalization(name=bn_name + '_shortcut' + trident_suffix)(x1)
elif shortcut == 'global_avg' or shortcut == 'global_max':
x1 = Lambda(pad_matrix_global, arguments={'type': shortcut},
name='lambda_padding_' + str(index) + trident_suffix)(x1)
x = module_addition(x, x1, index, 'a' + trident_suffix)
x = ELU(name=elu_name + '_after_addition' + trident_suffix)(x)
x = channel_spatial_squeeze_excite(x, index, ratio=ratio, binary_layer=binary_layer, trident=trident)
x = module_addition(x, x1, index, 'b' + trident_suffix)
return x
def network_module(inp, nfilters, ratio, pool_size, dropout_rate, index, pre_act=False, shortcut='conv',
binary_layer=False, trident=None):
""" Implementation presented in https://ieeexplore.ieee.org/abstract/document/9118879
:param inp: input tensor
:param nfilters: number of filter of convolutional layers
:param ratio: parameter for squeeze-excitation module
:param pool_size: size of the pool
:param dropout_rate: rate for dropout
:param index:
:param pre_act: pre_activation flag
:param shortcut:
:param binary_layer:
:param trident:
:return:
"""
trident_suffix = '' if trident is None else trident
x = conv_standard_post(inp, nfilters, ratio, index, pre_act=pre_act, shortcut=shortcut, binary_layer=binary_layer,
trident=trident)
x = MaxPooling2D(pool_size=pool_size, name='pool_' + str(index) + trident_suffix)(x)
x = Dropout(dropout_rate, name='dropout_' + str(index) + trident_suffix)(x)
return x
def module_addition(inp1, inp2, index, suffix):
"""
:param inp1:
:param inp2:
:param index:
:param suffix:
:return:
"""
if K.int_shape(inp1)[3] != K.int_shape(inp2)[3]:
x = add(
[inp1, Lambda(lambda y: K.repeat_elements(y, rep=int(K.int_shape(inp1)[3] // K.int_shape(inp2)[3]), axis=3),
name='lambda_add_' + str(index) + '_' + str(suffix))(inp2)])
else:
x = add([inp1, inp2])
return x
def pad_matrix_global(inp, type='global_avg'):
"""
:param inp:
:param type:
:return:
"""
h = K.int_shape(inp)[1]
w = K.int_shape(inp)[2]
if type == 'global_avg':
x1 = GlobalAveragePooling2D()(inp)
elif type == 'global_max':
x1 = GlobalMaxPooling2D()(inp)
x1_rep = K.repeat(x1, h * w)
x1_rep = Reshape((K.int_shape(x1)[1], h, w))(x1_rep)
x1_rep = K.permute_dimensions(x1_rep, (0, 2, 3, 1))
return x1_rep
def freq_split(inp, n_split_freqs, f_split_freqs):
"""
:param inp:
:param n_split_freqs:
:param f_split_freqs:
:return:
"""
if n_split_freqs == 2:
x1 = inp[:, 0:f_split_freqs[0], :, :]
x2 = inp[:, f_split_freqs[0]:, :, :]
return [x1, x2]
if n_split_freqs == 3:
x1 = inp[:, 0:f_split_freqs[0], :, :]
x2 = inp[:, f_split_freqs[0]:f_split_freqs[1], :, :]
x3 = inp[:, f_split_freqs[1]:, :, :]
return [x1, x2, x3]
| StarcoderdataPython |
1609497 | import os
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
import pandas as pd
from dash.dependencies import Input, Output
# reading data for statistic table
df = pd.read_csv('data.csv')
app = dash.Dash(__name__)
# needed because of Heroku deployement https://github.com/plotly/dash-daq/issues/25
app.scripts.config.serve_locally = True
colors = {
'background': '#323232',
'background_dark': '#1e1e1e',
'text': '#FFFFFF'
}
server = app.server
app.layout = html.Div([
html.Div([
html.H1('BITCOIN HEATMETER'),
], className='row', style={'textAlign': 'center'}),
html.Div([
html.Label('How interesting is Bitcoin to the world?'),
], style={'textAlign': 'center'}),
dcc.Interval(
id='interval-component',
interval=1*10000, # in milliseconds
n_intervals=0
),
# https://dash.plot.ly/dash-daq/gauge
html.Div([
daq.Gauge(
id='bitcoin-gauge-chart',
value=2,
max=10,
min=0,
units="MPH",
color={"gradient": True, "ranges": {
"green": [0, 6], "yellow": [6, 8], "red": [8, 10]}},
)
], className='row', style={'textAlign': 'center'}),
html.Div([
html.Label('24h interest since 2019-04-01'),
], className='row', style={'textAlign': 'center'}),
html.Div([
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict("rows"),
style_header={
'backgroundColor': colors['background_dark'],
'fontWeight': 'bold'
},
style_cell={
'backgroundColor': colors['background'],
'color': 'white',
'minWidth': '30px', 'width': '50px', 'maxWidth': '90px'
},
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'center'
} for c in df.columns
],
)
], className='row four columns offset-by-four'),
html.Div([
html.Label('Donate: BTC address'),
html.Img(
src='/assets/qrcode.png',
style={'width': '120px'}
)
], className='row', style={'textAlign': 'center'}),
], style={'backgroundColor': colors['background'], 'color': colors['text']})
# takes value from id='test_input' and return the value in value of id='bitcoin-gauge-chart'
@app.callback([
Output('bitcoin-gauge-chart', 'value'),
Output('bitcoin-gauge-chart', 'max'),
Output('bitcoin-gauge-chart', 'color'),
], [Input('interval-component', 'n_intervals'), ]
)
def update_gauge(n_intervals):
max = 100
min = 0
value = 50
print("TWEETS: ", value)
threshold_1 = max-round(max*0.6)
threshold_2 = max-round(max*0.3)
color = {"gradient": True, "ranges": {
"green": [min, threshold_1], "yellow": [threshold_1, threshold_2], "red": [threshold_2, max]}}
return value, max, color
if __name__ == '__main__':
app.run_server(debug=True)
| StarcoderdataPython |
11314690 | <gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.test import unittest
from libcloud.container.drivers.joyent import JoyentContainerDriver
from libcloud.test.secrets import CONTAINER_PARAMS_DOCKER
from libcloud.test.container.test_docker import (
DockerContainerDriverTestCase,
DockerMockHttp,
)
class JoyentContainerDriverTestCase(DockerContainerDriverTestCase, unittest.TestCase):
def setUp(self):
# Create a test driver for each version
versions = ("linux_124", "mac_124")
self.drivers = []
for version in versions:
JoyentContainerDriver.connectionCls.conn_class = DockerMockHttp
DockerMockHttp.type = None
DockerMockHttp.use_param = "a"
driver = JoyentContainerDriver(*CONTAINER_PARAMS_DOCKER)
driver.version = version
self.drivers.append(driver)
| StarcoderdataPython |
12853897 | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, MarianMTModel, \
MarianTokenizer
from .config import ConfigLanguageModel
class Model:
def __init__(self, conf: ConfigLanguageModel, models_path: str):
self._conf: ConfigLanguageModel = conf
self._models_path: str = models_path
self._tokenizer: Optional[MarianTokenizer] = None
self._model: Optional[MarianMTModel] = None
def load(self) -> None:
logging.info(f'[{self._conf.model}] - Loading tokenizer...')
self._tokenizer = AutoTokenizer.from_pretrained(
self._conf.model, cache_dir=self._models_path)
logging.info(f'[{self._conf.model}] - Loading model...')
self._model = AutoModelForSeq2SeqLM.from_pretrained(
self._conf.model, cache_dir=self._models_path)
logging.info(f'[{self._conf.model}] - Loaded.')
def translate(self, text: str) -> str:
tokenized = self._tokenizer(text, return_tensors="pt", padding=True)
outputs = self._model.generate(**tokenized)
return self._tokenizer.decode(outputs[0], skip_special_tokens=True)
| StarcoderdataPython |
1638979 | import WeiBanAPI
import json
import time # time.sleep延时
import os # 兼容文件系统
import random
tenantCode = '61050002' # 成电ID
def main():
# 显示License
licenseFile = open('.' + os.sep + 'LICENSE', encoding='utf-8')
print(licenseFile.read())
licenseFile.close()
# 登录
# 补打空cookie
cookie = ''
loginResponse = WeiBanAPI.qrLogin()
try:
print('登录成功,userName:' + loginResponse['data']['userName'])
time.sleep(2)
except BaseException:
print('登录失败')
print(loginResponse) # TODO: 这里的loginResponse调用没有考虑网络错误等问题
exit(0)
# 请求解析并打印用户信息
try:
print('请求用户信息')
stuInfoResponse = WeiBanAPI.getStuInfo(loginResponse['data']['userId'],
tenantCode,
cookie)
print('用户信息:' + stuInfoResponse['data']['realName'] + '\n'
+ stuInfoResponse['data']['orgName']
+ stuInfoResponse['data']['specialtyName']
)
time.sleep(2)
except BaseException:
print('解析用户信息失败,将尝试继续运行,请注意运行异常')
# 请求课程完成进度
try:
getProgressResponse = WeiBanAPI.getProgress(loginResponse['data']['preUserProjectId'],
tenantCode,
cookie)
print('课程总数:' + str(getProgressResponse['data']['requiredNum']) + '\n'
+ '完成课程:' +
str(getProgressResponse['data']['requiredFinishedNum']) + '\n'
+ '结束时间' + str(getProgressResponse['data']['endTime']) + '\n'
+ '剩余天数' + str(getProgressResponse['data']['lastDays'])
)
time.sleep(2)
except BaseException:
print('解析课程进度失败,将尝试继续运行,请注意运行异常')
# 请求课程列表
try:
getListCategoryResponse = WeiBanAPI.getListCategory(loginResponse['data']['preUserProjectId'],
'3',
tenantCode,
loginResponse['data']['userId'],
loginResponse['data']['token'])
time.sleep(2)
except BaseException:
print('请求课程列表失败')
print('解析课程列表并发送完成请求')
for Category in getListCategoryResponse['data']:
print('\n----章节码:' + Category['categoryCode'] +
'章节内容:' + Category['categoryName'])
try:
getListCourseResponse = WeiBanAPI.getListCourse(loginResponse['data']['preUserProjectId'],
'3',
Category['categoryCode'],
'',
loginResponse['data']['userId'],
tenantCode,
loginResponse['data']['token'])
time.sleep(2)
except BaseException:
print('请求课程列表失败')
for j in getListCourseResponse['data']:
print('课程内容:' + j['resourceName'] +
'\nuserCourseId:' + j['userCourseId'])
if (j['finished'] == 1):
print('已完成')
else:
print('发送完成请求')
WeiBanAPI.doStudy(
loginResponse['data']['preUserProjectId'], j['resourceId'], tenantCode)
WeiBanAPI.finishCourse(j['userCourseId'], tenantCode, cookie)
delayInt = WeiBanAPI.getRandomTime()
print('\n随机延时' + str(delayInt))
time.sleep(delayInt)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4928994 | <reponame>saurabh6790/community_erpnext_com
"""
Configuration for docs
Add properties
1. `source_link`
2. `docs_base_url`
3. `context`
"""
source_link = "https://github.com/frappe/community_erpnext_com"
docs_base_url = "https://frappe.github.io/community_erpnext_com"
headline = "Connects service seekers and providers"
sub_heading = "ERPNext Community Portal allows users to post jobs, find service providers and post jobs"
def get_context(context):
context.title = "ERPNext Community Portal"
| StarcoderdataPython |
5135959 | <reponame>lacie-life/YoctoPi
#
# SPDX-License-Identifier: MIT
#
import os
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.runtime.decorator.package import OEHasPackage
class GObjectIntrospectionTest(OERuntimeTestCase):
@OETestDepends(["ssh.SSHTest.test_ssh"])
@OEHasPackage(["python3-pygobject"])
def test_python(self):
script = """from gi.repository import GLib; print(GLib.markup_escape_text("<testing&testing>"))"""
status, output = self.target.run("python3 -c '%s'" % script)
self.assertEqual(status, 0, msg="Python failed (%s)" % (output))
self.assertEqual(output, "<testing&testing>", msg="Unexpected output (%s)" % output)
| StarcoderdataPython |
384040 | <reponame>hhhaaahhhaa/s3prl
import os
from s3prl.utility.download import _urls_to_filepaths
from .expert import UpstreamExpert as _UpstreamExpert
def mos_wav2vec2_local(ckpt, *args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
"""
assert os.path.isfile(ckpt)
kwargs["upstream"] = "wav2vec2"
return _UpstreamExpert(ckpt, *args, **kwargs)
def mos_wav2vec2_url(ckpt, refresh=False, *args, **kwargs):
"""
The model from URL
ckpt (str): URL
"""
return mos_wav2vec2_local(_urls_to_filepaths(ckpt), *args, **kwargs)
def mos_wav2vec2(refresh=False, *args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
"""
kwargs[
"ckpt"
] = "https://www.dropbox.com/s/s9zpouk5svu1a4l/wav2vec2-dev-SRCC-best.ckpt?dl=0"
return mos_wav2vec2_url(refresh=refresh, *args, **kwargs)
def mos_tera_local(ckpt, *args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
"""
assert os.path.isfile(ckpt)
kwargs["upstream"] = "tera"
return _UpstreamExpert(ckpt, *args, **kwargs)
def mos_tera_url(ckpt, refresh=False, *args, **kwargs):
"""
The model from URL
ckpt (str): URL
"""
return mos_tera_local(_urls_to_filepaths(ckpt), *args, **kwargs)
def mos_tera(refresh=False, *args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
"""
kwargs[
"ckpt"
] = "https://www.dropbox.com/s/w4jk5bujaoosk69/tera-dev-SRCC-best.ckpt?dl=0"
return mos_tera_url(refresh=refresh, *args, **kwargs)
def mos_apc_local(ckpt, *args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
"""
assert os.path.isfile(ckpt)
kwargs["upstream"] = "apc"
return _UpstreamExpert(ckpt, *args, **kwargs)
def mos_apc_url(ckpt, refresh=False, *args, **kwargs):
"""
The model from URL
ckpt (str): URL
"""
return mos_apc_local(_urls_to_filepaths(ckpt), *args, **kwargs)
def mos_apc(refresh=False, *args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
"""
kwargs[
"ckpt"
] = "https://www.dropbox.com/s/ulng31as15hsvz1/apc-dev-SRCC-best.ckpt?dl=0"
return mos_apc_url(refresh=refresh, *args, **kwargs)
| StarcoderdataPython |
1666303 | <gh_stars>0
def test_conduit06():
# Conduit_TC_006_Reg
# Kijelentkezés
# Előfeltételek:
# 1- A gazdagép elérhető
# 2- A gazdagépen fut a Conduit
# 3- Chrome Verzió: 91.0.4472.77 (Hivatalos verzió) (64 bites)
# 4- OS: Windows 10
# 5- Bejelentkezett felhasználó: Email: <EMAIL> Password: <PASSWORD>$
# 6- Szükséges a TC_001 és a TC_002 tesztek sikeres lefutása
# Követelmény: Req.id: R03
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.add_argument('--headless')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
# oldal betöltése
driver.get("http://localhost:1667")
# Bejelentkezéshez szükséges adatok
email_data = ["<EMAIL>"]
password_data = ["<PASSWORD>$"]
# Bejelentkezés függvény
def login(email_l, password_l):
sign_in = driver.find_element_by_xpath('//a[@href="#/login"]')
sign_in.click()
email = driver.find_element_by_xpath('//input[@placeholder="Email"]')
email.send_keys(email_l)
password = driver.find_element_by_xpath('//input[@placeholder="Password"]')
password.send_keys(password_l)
button = driver.find_element_by_xpath('//button[@class="btn btn-lg btn-primary pull-xs-right"]')
button.click()
time.sleep(2)
# Kijelentkezés függvény
def logout():
log_out = driver.find_element_by_xpath('//a[@active-class="active"]')
log_out.click()
time.sleep(2)
# Bejelentkezés függvény meghívása tesztadatokkal
login(email_data[0], password_data[0])
test_user1 = driver.find_element_by_xpath('//li[@class="nav-item"]//a[@href="#/@testuser1/"]')
# Ellenőrizzük a bejelentkezett felhasználó megjelenését
assert test_user1.text == "testuser1"
assert test_user1.is_displayed()
time.sleep(3)
# Kijelentkezés függvény meghívása
logout()
# Ellenőrizzük, hogy a felhasználó már nincs bejelentkezve
test_user1_check = driver.find_elements_by_xpath('//li[@class="nav-item"]//a[@href="#/@testuser1/"]')
assert len(test_user1_check) == 0
driver.close()
| StarcoderdataPython |
4993433 | <gh_stars>1-10
from django.shortcuts import render
from .models import *
from .Se import *
from django.http import HttpResponse, JsonResponse
from rest_framework.decorators import api_view, permission_classes
from rest_framework.views import APIView
from rest_framework import exceptions
# from SAcore.utils.auth import Authentication
from django.utils import timezone
from rest_framework.pagination import PageNumberPagination
from rest_framework.parsers import FileUploadParser, MultiPartParser
from django.contrib.auth import authenticate, login
import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils import six
from django.core.mail import send_mail
from django.contrib.auth import get_user_model
from rest_framework.permissions import IsAuthenticated
User = get_user_model()
# Create your views here.
class ProfileView(APIView):
'''
个人信息相关业务
'''
permission_classes = (IsAuthenticated,)
# 获得个人信息
def get(self, request, *args, **kwargs):
user = request.user
user_se = UserSerializer(user)
return JsonResponse(user_se.data)
# 修改个人信息
def post(self, request, *args, **kwargs):
user = request.user
data = request.data['data']
user_se = UserSerializer(user, data=data)
if user_se.is_valid():
user_se.save()
return JsonResponse(user_se.data)
return JsonResponse(user_se.errors, status=400)
# 申请成为专家需要使用邮箱验证实现
# 申请成为专家
def put(self, request, *args, **kwargs):
user = request.user
uid = request.data['uid']
# try:
au=Author.nodes.get(uid=uid)
email=au.email
token=account_activation_token.make_token(user)
AuthorToken.objects.get_or_create(email=email,token=token)
send_mail(
subject='科技资源交易平台验证邮件',
message='欢迎来到科技资源交易平台,您的验证码为:%s,请输入验证码完成验证'%(token),
from_email='<EMAIL>',
recipient_list=[email],
)
return JsonResponse({'email': email})
# except Exception as e:
# return JsonResponse({'msg': "申请失败"}, status=400)
#生成邮箱验证所需token
class TokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return (
six.text_type(user.pk) + six.text_type(timestamp) +
six.text_type(user.is_active)
)
account_activation_token = TokenGenerator()
class VerifyView(APIView):
def post(self, request, *args, **kwargs):
data=request.data['data']
email=data['email']
token=data['token']
username=data['username']
try:
at=AuthorToken.objects.get(email=email)
except AuthorToken.MultipleObjectsReturned:
return JsonResponse({"msg":"此专家已在本网站上认证过"})
au=Author.nodes.get(email=email)
if token == at.token:
u=User.objects.get(username=username)
u.Type='E'
u.uid=au.uid
u.save()
return JsonResponse({'msg':"申请成功"},status=200)
else:
return JsonResponse({'msg':"验证码错误"},status=400)
# 用户经过认证后,作为专家登录还需修改
class AuthorView(APIView):
'''
专家信息相关业务
'''
# 用户认证
# permission_classes = (IsAuthenticated,)
# 文件parser
parser_classes = (MultiPartParser,)
# 获得专家个人信息
def get(self, request, *args, **kwargs):
uid = request.GET['uid']
au = Author.nodes.get_or_none(uid=uid)
res = au.serialize
return JsonResponse(res, status=200)
# 修改专家邮箱,可以考虑加入邮箱修改结合邮箱验证
@permission_classes(IsAuthenticated,)
def post(self, request, *args, **kwargs):
user = request.user
email = request.data['email']
token=account_activation_token.make_token(user)
AuthorToken.objects.create(email=email,token=token).save()
send_mail(
subject='科技资源交易平台验证邮件',
message='欢迎来到科技资源交易平台,您的验证码为:%s,请输入验证码完成验证'%(token),
from_email='<EMAIL>',
recipient_list=email,
)
return JsonResponse({'email':email})
# 发布资源
@permission_classes(IsAuthenticated,)
def put(self, request, *args, **kwargs):
user=request.user
data = request.data
attach_file = request.FILES['file']
t=data["type"]
authors_text = data['authors']
authors_names = authors_text.split(',')
au_list=[]
if t=="P1":
r=Paper(name=data['name'],
abstract=data['abstract'],
keywords=data['keywords'],
price=data['price'],).save()
r.author1.connect(Author.nodes.get(uid=user.uid))
for name in authors_names:
au = Author.nodes.get_or_none(name=name)
if au is None:
au=Author(name=name).save()
au_list.append(au)
r.authors.connect(au)
au.publishes.connect(r)
au.save()
Resource.objects.create(Type="P1",name=r.name,files=attach_file,uid=r.uid).save()
elif t=="P2":
r=Patent(name=data['name'],
patent_id=data['patent_id'],
applicant_date=data['applicant_date']).save()
for name in authors_names:
au = Author.nodes.get_or_none(name=name)
if au is None:
au=Author(name=name).save()
au_list.append(au)
r.inventor.connect(au)
au.invent.connect(r)
au.save()
Resource.objects.create(Type="P2",name=r.name,files=attach_file,uid=r.uid).save()
r.resource_url="static/files/"+attach_file.name
r.save()
for i in range(len(au_list)):
au=au_list[i]
for au1 in au_list:
if au1!=au:
au.coworkers.connect(au1)
au.save()
return JsonResponse({'msg': "上传成功"})
class RegisterView(APIView):
'''
注册视图
'''
def post(self, request, *args, **kwargs):
data = request.data['data']
username = data['username']
user = User.objects.filter(username=username).first()
if user:
return JsonResponse({'msg': "该用户名已被注册, 换一个试试吧"}, status=400)
else:
password=<PASSWORD>['password']
User.objects.create_user(username=username,password=password)
return JsonResponse({'msg':"注册成功!"}, status=200)
# 搜索函数需要重新写
class SearchView(APIView):
'''
搜索视图
'''
# 用户认证
# authentication_classes = [Authentication,]
# 获取搜索结果
def post(self, request, *args, **kwargs):
Type=request.data['type']
field = request.data['field']
string=request.data['content']
result=[]
ret={}
if Type=='P1':
t="Paper"
else:
t="Patent"
if field=='Title':
query="match(n:"+t+") where n.name =~'.*"+string+".*' return n limit 50"
results, meta = db.cypher_query(query)
if Type=='P1':
re = [Paper.inflate(row[0]) for row in results]
else:
re = [Patent.inflate(row[0]) for row in results]
for pa in re:
result.append(pa.serialize)
elif field=='Author':
if Type=='P1':
query="match(n:"+t+")-[r:AUTHOR1]-(a:Author) where a.name =~'.*"+string+".*' return n limit 50"
results, meta = db.cypher_query(query)
re = [Paper.inflate(row[0]) for row in results]
for pa in re:
result.append(pa.serialize)
query="match(n:"+t+")-[r:AUTHORS]-(a:Author) where a.name =~'.*"+string+".*' return n limit 50"
results, meta = db.cypher_query(query)
re = [Paper.inflate(row[0]) for row in results]
for pa in re:
result.append(pa.serialize)
else:
query="match(n:"+t+")-[r:INVENTOR]-(a:Author) where a.name =~'.*"+string+".*' return n limit 50"
results, meta = db.cypher_query(query)
re = [Patent.inflate(row[0]) for row in results]
for pa in re:
result.append(pa.serialize)
ret['result']=result
return JsonResponse(ret)
class AdvanceSearchView(APIView):
def post(self, request, *args, **kwargs):
title=request.data['title']
author=request.data['author']
Type=request.data['type']
time_low=request.data['time_low']
time_high=request.data['time_high']
if len(time_low)==0:
time_low=-100
if len(time_high)==0:
time_high=10000
result=[]
ret={}
if Type =='P2':
query="match(n:Patent)-[r:INVENTOR]-(a:Author) where n.name =~'.*"+title+".*' \
"" \
and a.name =~'.*"+author+".*' \
"" \
"" \
return n limit 50"
results, meta = db.cypher_query(query)
re = [Patent.inflate(row[0]) for row in results]
for pa in re:
result.append(pa.serialize)
else:
query="match(n:Paper)-[r:AUTHOR1]-(a:Author) where n.name =~'.*"+title+".*' \
"" \
and a.name =~'.*"+author+".*' \
and n.year >= "+str(time_low)+" \
and n.year <= "+str(time_high)+" \
return n limit 50"
results, meta = db.cypher_query(query)
re = [Paper.inflate(row[0]) for row in results]
for pa in re:
result.append(pa.serialize)
query="match(n:Paper)-[r:AUTHORS]-(a:Author) where n.name =~'.*"+title+".*' \
"" \
and a.name =~'.*"+author+".*' \
and n.year >= "+str(time_low)+" \
and n.year <= "+str(time_high)+" \
return n limit 50"
results, meta = db.cypher_query(query)
re = [Paper.inflate(row[0]) for row in results]
for pa in re:
result.append(pa.serialize)
ret['result']=result
return JsonResponse(ret)
# 对资源的获取需要修改
class DetailView(APIView):
'''
展示某一资源的详细信息
'''
# authentication_classes = [Authentication,]
def post(self, request, *args, **kwargs):
ret = {}
t = request.data['Type']
uid = request.data['uid']
# try:
if t == "P1":
r1 = Paper.nodes.get(uid=uid)
else:
r1 = Patent.nodes.get(uid=uid)
ret = {**ret, **r1.serialize}
# except Exception as e:
# return JsonResponse({'msg': "该资源不存在"}, status=400)
if request.user.is_authenticated:
r1 = request.user.star_list.filter(uid=uid).first()
b1 = request.user.buyed_list.filter(uid=uid).first()
if r1:
ret['starred'] = True
else:
ret['starred'] = False
if b1:
ret['bought'] = True
else:
ret['bought'] = False
else:
ret['starred'] = False
ret['bought'] = False
return JsonResponse(ret)
# Resource的get函数需要修改
class StarView(APIView):
'''
收藏视图
'''
permission_classes = (IsAuthenticated,)
# 列出当前用户的所有收藏
def get(self, request, *args, **kwargs):
user = request.user
result={}
star=[]
res = user.star_list.all().order_by("uid")
for p in res:
if p.Type=="P1":
star.append(Paper.nodes.get(uid=p.uid).simple_serialize)
else:
star.append(Patent.nodes.get(uid=p.uid).serialize)
result['star']=star
return JsonResponse(result)
# Resource的get函数需要修改
# 批量添加收藏
def post(self, request, *args, **kwargs):
user = request.user
data = request.data['data']
star_items = data['item_list']
star_items_list=star_items.split(",")
type_list=data['type_list']
type_list=type_list.split(",")
for i in star_items_list:
# try:
r1,created = Resource.objects.get_or_create(uid=i)
if created:
r1.Type=type_list[star_items_list.index(i)]
if r1.Type=="P1":
r1.name=Paper.nodes.get(uid=i).name
else:
r1.name=Patent.nodes.get(uid=i).name
r1.save()
user.star_list.add(r1)
user.save()
# except Exception as e:
# return JsonResponse({"msg": "收藏失败"}, status=400)
return JsonResponse({"msg": "收藏成功"}, status=200)
# 批量取消收藏
def delete(self, request, *args, **kwargs):
user = request.user
data = request.data['data']
star_items = data['item_list']
star_items_list=star_items.split(",")
for i in star_items_list:
# try:
r1 = user.star_list.get(uid=i)
user.star_list.remove(r1)
user.save()
# except Exception as e:
# return JsonResponse({"msg": "取消收藏失败"}, status=400)
return JsonResponse({"msg": "已取消收藏"}, status=200)
class BuyedView(APIView):
'''
已购资源视图
'''
# 用户认证
# 列出所有已购资源
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
user = request.user
result={}
buy=[]
res = user.buyed_list.all()
for p in res:
if p.Type=="P1":
buy.append(Paper.nodes.get(uid=p.uid).simplie_serialize)
else:
buy.append(Patent.nodes.get(uid=p.uid).simplie_serialize)
result['buy']=buy
return JsonResponse(result)
# resource的get函数需要修改
# 购买资源
def post(self, request, *args, **kwargs):
user = request.user
uid = request.data['uid']
try:
p=Paper.nodes.get(uid=uid)
except Exception as e:
return JsonResponse({"msg": "资源不存在"}, status=404)
if user.balance < p.price:
return JsonResponse({'msg': "余额不足"}, status=400)
try:
user.balance -= p.price
r1=Resource.objects.get_or_create(uid=uid,Type="P1")
user.buyed_list.add(r1)
user.save()
except Exception as e:
return JsonResponse({'msg': "交易失败"}, status=401)
return JsonResponse({"msg": "交易成功", "balance": user.balance}, status=200)
# 关于获取头像时返回的内容还需确认与修改
class AvatorView(APIView):
'''
头像相关业务
'''
# 验证身份
# 关于获取头像时返回的内容还需确认与修改
# 获取头像
def get(self, request, *args, **kwargs):
uid=request.GET['uid']
try:
a=Avator.objects.get(uid=uid)
return JsonResponse({"url":"static/author_avator/"+a.avator.name.split('/')[-1]})
except Avator.DoesNotExists:
return JsonResponse({"avator":"static/author_avator/default.jpg"})
# 专家的头像上传还需修改
# 上传头像
@permission_classes(IsAuthenticated,)
def post(self, request, *args, **kwargs):
user = request.user
if user.Type == 'E':
request.data['uid']=user.uid
a=request.FILES['avator']
user.avator="static/author_avator/"+a.name
user.save()
se = AuthorAvatorSerializer(data=request.data)
if se.is_valid():
se.save()
return JsonResponse(se.data)
return JsonResponse(se.errors, status=400)
class RechargeView(APIView):
'''
充值视图
'''
# 用户认证
permission_classes = (IsAuthenticated,)
# 充值
def post(self, request, *args, **kwargs):
user = request.user
key = request.data['key']
card = RechargeCard.objects.filter(token=key).first()
if not card:
return JsonResponse({'msg': "无效的充值卡"}, status=400)
try:
user.balance += card.amout
card.delete()
except Exception as e:
return JsonResponse({'msg': "充值失败"}, status=400)
return JsonResponse({'msg': "充值成功"}, status=200)
class CoGraphView(APIView):
def post(self, request, *args, **kwargs):
uid=request.data['uid']
base=Author.nodes.get(uid=uid)
user_set=set((uid,))
base_id=0
nodes=[]
links=[]
l={}
l["sourceWeight"]=1
l["targetWeight"]=1
n={"id":base_id,"name":base.name,"value":1}
nodes.append(n)
for co in base.coworkers.all():
if co.uid not in user_set:
user_set.add(co.uid)
new={}
new["id"]=len(user_set)-1
new["name"]=co.name
new["value"]=1
nodes.append(new)
new_link={}
new_link["sourceWeight"]=1
new_link["targetWeight"]=1
new_link["source"]=base_id
new_link["target"]=new["id"]
links.append(new_link)
if len(nodes)>20:
break
for n in nodes[1:]:
base=Author.nodes.get(name=n["name"])
base_id=n["id"]
for co in base.coworkers.all():
if co.uid not in user_set:
if len(nodes)<20:
user_set.add(co.uid)
new={}
new["id"]=len(user_set)-1
new["name"]=co.name
new["value"]=1
nodes.append(new)
new_link={}
new_link["sourceWeight"]=1
new_link["targetWeight"]=1
new_link["source"]=base_id
new_link["target"]=new["id"]
links.append(new_link)
else:
for n in nodes:
if n["name"]==co.name:
new_link={}
new_link["sourceWeight"]=1
new_link["targetWeight"]=1
new_link["source"]=base_id
new_link["target"]=n["id"]
links.append(new_link)
res={}
res["nodes"]=nodes
res["links"]=links
return JsonResponse(res)
class InterestedView(APIView):
@permission_classes(IsAuthenticated,)
def post(self, request, *args, **kwargs):
send_user=request.user
pid=request.data["pid"]
message=request.data["message"]
patent=Patent.nodes.get(uid=pid)
name=patent.name
flag=True
for inv in patent.inventor.all():
try:
rec=User.objects.get(uid=inv.uid)
interested.objects.create(patent_id=pid,send_user=send_user,message=message,receive_user=rec,patent_title=name)
flag=False
except User.DoesNotExist:
continue
if flag:
return JsonResponse({"msg":"很抱歉此专利发明者尚未在本站认证"})
else:
return JsonResponse({"msg":"消息发送成功!"},status=200)
@permission_classes(IsAuthenticated,)
def get(self, request, *args, **kwargs):
user=request.user
ret={}
result=[]
in_list=interested.objects.filter(send_user=user)
for ins in in_list:
in_se={}
in_se["patent_id"]=ins.patent_id
in_se["patent_title"]=ins.patent_title
if user.Type=="U":
in_se["receive_user"]=Author.nodes.get(uid=ins.receive_user.uid).name
elif user.Type=="E":
in_se["receive_user"]=ins.receive_user.username
in_se["message"]=ins.message
in_se["status"]=ins.status
in_se["message_id"]=ins.id
result.append(in_se)
ret["result"]=result
return JsonResponse(ret)
def put(self, request, *args, **kwargs):
id=request.data["id"]
try:
ins=interested.objects.get(pk=id)
ins.status=True
ins.save()
except interested.DoesNotExists:
return JsonResponse({"msg":"此信息不存在"})
return JsonResponse({"msg":"此信息已读"})
class ReplyView(APIView):
@permission_classes(IsAuthenticated)
def post(self, request, *args, **kwargs):
send_user=request.user
reply_message=interested.objects.get(pk=request.data['id'])
interested.objects.create(patent_id=reply_message.patent_id,send_user=send_user,receive_user=reply_message.send_user,message=request.data['message'],patent_title=reply_message.patent_title)
return JsonResponse({"msg":"消息回复成功!"})
@permission_classes(IsAuthenticated)
def get(self, request, *args, **kwargs):
receive_user=request.user
ret={}
result=[]
in_list=interested.objects.filter(receive_user=receive_user)
for ins in in_list:
in_se={}
in_se["patent_id"]=ins.patent_id
in_se["patent_title"]=ins.patent_title
in_se["message"]=ins.message
in_se["status"]=ins.status
in_se["message_id"]=ins.id
if receive_user.Type=="E":
in_se["send_user"]=ins.send_user.username
elif receive_user.Type=="U":
in_se["send_user"]=Author.nodes.get(uid=ins.send_user.uid).name
result.append(in_se)
ret["result"]=result
return JsonResponse(ret)
| StarcoderdataPython |
3366255 | <filename>wwwhero/models.py
import random
from random import randint
from datetime import timedelta
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator
from django.db import models, transaction
from django.utils import timezone
from wwwhero.exceptions import LevelUpCooldownError, MaxLevelError
class Character(models.Model):
MAX_LEVEL = 20
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=64, null=False)
level = models.PositiveSmallIntegerField(default=1)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def level_up(self):
self.level += 1
attrs, _ = CharacterAttributes.objects.get_or_create(character=self)
cooldown = CharacterCooldown.objects.filter(
character=self,
type=CharacterCooldown.Type.LEVEL,
).first()
if cooldown and cooldown.until > timezone.now():
raise LevelUpCooldownError
if self.level > self.MAX_LEVEL:
raise MaxLevelError
with transaction.atomic():
CharacterCooldown.objects.update_or_create(
character=self,
type=CharacterCooldown.Type.LEVEL,
defaults={
"until": timezone.now() + timedelta(seconds=2 ** self.level)
}
)
inv = Inventory.objects.get(character=self)
inv.max_space += 1
inv.save(update_fields=["max_space"])
attrs.upgrade()
self.save()
def __str__(self):
return f"{self.user}, {self.name}, level {self.level}"
class Meta:
unique_together = ["user", "name"]
class CharacterSelection(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
character = models.OneToOneField(Character, on_delete=models.CASCADE)
class CharacterAttributes(models.Model):
LEVEL_UP_POINTS = 25
character = models.OneToOneField(
Character,
on_delete=models.CASCADE,
primary_key=True,
)
max_hp = models.IntegerField(default=10)
hp = models.IntegerField(default=10)
dmg = models.IntegerField(default=1)
luck = models.IntegerField(default=1)
def upgrade(self):
hp_increase = randint(1, 24)
self.max_hp += hp_increase
self.hp += hp_increase
self.dmg += self.LEVEL_UP_POINTS - hp_increase
self.luck += randint(0, 1)
self.save()
def __str__(self):
return f"Character name: {self.character.name}," \
f" HP {self.hp}/{self.max_hp}, DMG {self.dmg}, Luck {self.luck}"
class Meta:
verbose_name_plural = "Character attributes"
class CharacterCooldown(models.Model):
class Type(models.IntegerChoices):
LEVEL = 1, "Level"
SKILL = 2, "Skill"
SEARCH = 3, "Search"
type = models.PositiveSmallIntegerField(
choices=Type.choices,
default=Type.LEVEL,
blank=False
)
character = models.ForeignKey(Character, on_delete=models.CASCADE)
until = models.DateTimeField()
class Meta:
unique_together = ["type", "character"]
class LocationType(models.Model):
name = models.CharField(max_length=32, primary_key=True)
def __str__(self):
return self.name
class Location(models.Model):
name = models.CharField(max_length=64, unique=True)
image = models.ImageField(upload_to="bg/location", blank=True)
min_level = models.PositiveSmallIntegerField(
validators=[MinValueValidator(1)],
default=1
)
is_active = models.BooleanField(default=False)
type = models.ForeignKey(LocationType, on_delete=models.CASCADE)
def __str__(self):
return self.name
class CharacterLocation(models.Model):
character = models.OneToOneField(
Character,
on_delete=models.CASCADE,
primary_key=True
)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return str(self.location)
class ItemBlueprint(models.Model):
class ItemType(models.IntegerChoices):
ARMOR = 1, "Armor"
JUNK = 2, "Junk"
GOLD = 3, "Gold"
QUEST = 4, "Quest"
WEAPON = 5, "Weapon"
class SlotType(models.IntegerChoices):
HEAD = 1, "Head"
BREAST = 2, "Breast"
LEGS = 3, "Legs"
LEFT = 4, "Left arm"
RIGHT = 5, "Right arm"
BOOTS = 6, "Boots"
INVENTORY = 7, "Inventory"
item_type = models.PositiveSmallIntegerField(choices=ItemType.choices, blank=False)
slot_type = models.PositiveSmallIntegerField(choices=SlotType.choices, blank=False)
name = models.CharField(max_length=32, unique=True)
description = models.CharField(max_length=512, blank=True)
is_consumable = models.BooleanField(default=False)
is_stackable = models.BooleanField(default=False)
is_droppable = models.BooleanField(default=False)
base_cost = models.PositiveSmallIntegerField(default=1)
def __str__(self):
return f"{self.name} ({self.get_item_type_display()})"
class Inventory(models.Model):
character = models.ForeignKey(Character, on_delete=models.CASCADE)
max_space = models.PositiveSmallIntegerField(default=20)
def __str__(self):
return f"{self.character}'s inventory"
class Item(models.Model):
class Rarity(models.IntegerChoices):
COMMON = 1, "Common"
UNCOMMON = 2, "Uncommon"
RARE = 3, "Rare"
EPIC = 4, "Epic"
LEGENDARY = 5, "Legendary"
MAX_NAME_LENGTH = 64
blueprint = models.ForeignKey(ItemBlueprint, on_delete=models.CASCADE)
inventory = models.ForeignKey(
Inventory,
blank=True,
null=True,
on_delete=models.CASCADE
)
name = models.CharField(max_length=MAX_NAME_LENGTH, default="")
level = models.PositiveSmallIntegerField(default=1)
amount = models.PositiveSmallIntegerField(default=1)
rarity = models.PositiveSmallIntegerField(choices=Rarity.choices)
min_damage = models.PositiveSmallIntegerField(default=0)
max_damage = models.PositiveSmallIntegerField(default=0)
defense = models.PositiveSmallIntegerField(default=0)
health = models.PositiveSmallIntegerField(default=0)
cost = models.PositiveSmallIntegerField(default=1)
skin = models.ImageField(upload_to="skins/item", blank=True)
def __str__(self):
prefix, inventory = "", ""
if self.blueprint.is_stackable:
prefix = f"{self.amount} "
if self.inventory:
inventory = f" ({self.inventory})"
return f"{prefix}{self.name}{inventory}"
def generate_name(self):
prefixes = {
self.Rarity.COMMON: ["broken", "old", "useless", "dirty", "ugly"],
self.Rarity.UNCOMMON: ["simple", "ordinary", "uncommon"],
self.Rarity.RARE: ["new", "nice", "quality", "rare", "shiny"],
self.Rarity.EPIC: ["fancy", "epic", "brutal"],
self.Rarity.LEGENDARY: ["legendary", "extraordinary", "incredible"],
}
postfixes = {
self.Rarity.COMMON: ["dumbness", "misery", "bad luck", "dirt", ""],
self.Rarity.UNCOMMON: ["", "fear", ""],
self.Rarity.RARE: ["", "queen", "king", "sad harold"],
self.Rarity.EPIC: ["rare sand", "void from the ocean", "epicity"],
self.Rarity.LEGENDARY: ["insane power", "", "immortality", "unstable power"],
}
postfix = random.choice(postfixes[self.Rarity(self.rarity)])
prefix = random.choice(prefixes[self.Rarity(self.rarity)])
self.name = f"{prefix} {self.blueprint.name}{' of ' + postfix if postfix else ''}"
if len(self.name) > self.MAX_NAME_LENGTH:
self.name = self.blueprint.name
self.save(update_fields=["name"])
return self.name
class UserVisit(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
view = models.PositiveSmallIntegerField(default=0)
url = models.CharField(null=False, max_length=200)
method = models.CharField(max_length=8)
def __str__(self):
if len(self.url) > 10:
url = self.url[:10] + "..."
else:
url = self.url
return f"{self.user.username}, {url}, {self.method}, {self.view} views"
class Meta:
unique_together = ["user", "url", "method"]
| StarcoderdataPython |
8188104 | import logging
from collections import defaultdict
from typing import List, Set, Tuple
from django.contrib import admin
from django.db.models import QuerySet
from model_garden.models import Dataset, MediaAsset
from model_garden.services import S3Client
from model_garden.services.s3 import DeleteError
from .common import FilterCreatedFixture, format_date
logger = logging.getLogger(__name__)
@admin.register(Dataset)
class DatasetAdmin(admin.ModelAdmin, FilterCreatedFixture):
date_hierarchy = 'created_at'
list_display = ('id', 'created', 'path', 'bucket', 'dataset_format')
list_filter = ('created_at', 'bucket', 'dataset_format')
search_fields = ('id', 'path')
def has_add_permission(self, request, obj=None):
return False
def created(self, obj):
return format_date(obj.created_at)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super().get_search_results(request, queryset, search_term)
queryset |= self.filter_created(request, queryset, search_term)
return queryset, use_distinct
def delete_model(self, request, obj):
queryset = type(obj).objects.filter(pk=obj.pk)
self.delete_queryset(request, queryset)
def delete_queryset(self, request, queryset):
media_assets = list(get_media_assets(queryset))
bucket_map = defaultdict(list)
for asset in media_assets:
bucket_map[asset.dataset.bucket.name].append(asset)
error_keys: Set[Tuple(str, str)] = set()
for bucket, assets in bucket_map.items():
file_path_to_remove = ([asset.full_path for asset in assets]
+ [asset.full_label_path for asset in assets])
delete_errors = delete_files_in_s3(
bucket, file_path_to_remove,
)
error_keys |= set((bucket, error.key) for error in delete_errors)
(
MediaAsset.objects
.filter(
pk__in=[
asset.pk for asset in media_assets
if (asset.dataset.bucket.name, asset.full_path) not in error_keys
],
).delete()
)
(
queryset
.exclude(
pk__in=set(
asset.dataset.pk for asset in media_assets
if (asset.dataset.bucket.name, asset.full_path) in error_keys
),
)
.delete()
)
def get_media_assets(dataset: QuerySet) -> QuerySet:
return (
MediaAsset.objects
.filter(dataset__in=dataset)
.select_related('dataset')
.select_related('dataset__bucket')
)
def delete_files_in_s3(bucket: str, keys: List[str]) -> List[DeleteError]:
if not keys:
return []
client = S3Client(bucket_name=bucket)
errors = client.delete_files_concurrent(*keys)
if errors:
logger.error(
'Unable to delete media_assets in bucket %s: %s', bucket, errors,
)
return errors
| StarcoderdataPython |
3286327 | import shutil
import traceback
from celery import group, task
from django.conf import settings
from raster.tiles.const import GLOBAL_MAX_ZOOM_LEVEL, MIN_ZOOMLEVEL_TASK_PARALLEL
from raster.tiles.parser import RasterLayerParser
@task
def create_tiles(rasterlayer_id, zoom, extract_metadata=False):
"""
Create all tiles for a raster layer at the input zoom level.
"""
try:
parser = RasterLayerParser(rasterlayer_id)
# Open raster file and extract metadata if requested.
if extract_metadata:
parser.open_raster_file()
parser.extract_metadata()
# Check if zoom level should be built.
if zoom is None:
zoom = parser.max_zoom
elif isinstance(zoom, (int, float)):
if zoom > parser.max_zoom:
return
else:
zoom = [zl for zl in zoom if zl <= parser.max_zoom]
if not len(zoom):
return
# Open raster file if not open already.
if not extract_metadata:
parser.open_raster_file()
parser.reproject_rasterfile()
parser.create_tiles(zoom)
except:
parser.log(
traceback.format_exc(),
status=parser.rasterlayer.parsestatus.FAILED
)
raise
finally:
if hasattr(parser, 'tmpdir'):
tmpdir = parser.tmpdir
parser.dataset = None
parser = None
shutil.rmtree(tmpdir)
@task
def clear_tiles(rasterlayer_id):
"""
Drop all tiles of a rasterlayer.
"""
parser = RasterLayerParser(rasterlayer_id)
parser.drop_all_tiles()
@task
def send_success_signal(rasterlayer_id):
"""
Drop empty tiles of a raster layer and send parse succes signal.
"""
parser = RasterLayerParser(rasterlayer_id)
parser.send_success_signal()
@task
def all_in_one(rasterlayer_id, zoom_range):
"""
Parses raster in a single task.
"""
clear_tiles(rasterlayer_id)
create_tiles(rasterlayer_id, zoom_range, True)
send_success_signal(rasterlayer_id)
def parse(rasterlayer_id):
"""
Parse raster layer to extract metadata and create tiles.
"""
parser = RasterLayerParser(rasterlayer_id)
parser.log('Started parsing raster.')
# Create array of all allowed zoom levels
if parser.rasterlayer.build_pyramid:
zoom_range = list(range(GLOBAL_MAX_ZOOM_LEVEL + 1))
else:
if parser.rasterlayer.max_zoom is not None:
zoom_range = (parser.rasterlayer.max_zoom, )
else:
zoom_range = None
# Check if parsing should happen asynchronously
parse_async = getattr(settings, 'RASTER_USE_CELERY', False)
parse_single_task = getattr(settings, 'RASTER_PARSE_SINGLE_TASK', False)
if parse_async and not parse_single_task:
if zoom_range is not None:
# Bundle the first five raster layers to one task. For low zoom
# levels, downloading is more costly than parsing.
create_tiles_chain = create_tiles.si(rasterlayer_id, zoom_range[:MIN_ZOOMLEVEL_TASK_PARALLEL], True)
# Run the higher level zooms in parallel tasks through a task group.
if len(zoom_range) > MIN_ZOOMLEVEL_TASK_PARALLEL:
high_zoom_level_group = group(
create_tiles.si(rasterlayer_id, zoom) for zoom in zoom_range[MIN_ZOOMLEVEL_TASK_PARALLEL:]
)
# Combine bundle and middle levels to priority group.
create_tiles_chain = (create_tiles_chain | high_zoom_level_group)
else:
create_tiles_chain = create_tiles.si(rasterlayer_id, None, True)
# Setup the parser logic as parsing chain
parsing_task_chain = (
clear_tiles.si(rasterlayer_id)
| create_tiles_chain
| send_success_signal.si(rasterlayer_id)
)
# Apply the parsing chain
parser.log('Parse task queued, waiting for worker availability.')
parsing_task_chain.apply_async()
elif parse_async and parse_single_task:
parser.log('Parse task queued in all-in-one mode, waiting for worker availability.')
all_in_one.delay(rasterlayer_id, zoom_range)
else:
all_in_one(rasterlayer_id, zoom_range)
| StarcoderdataPython |
11313691 | from fastapi import Depends, Header,File, Body,Query, UploadFile, FastAPI, HTTPException, APIRouter, Request, Response, Form, status, BackgroundTasks
from fastapi.security import OAuth2PasswordRequestForm
from fastapi.encoders import jsonable_encoder
from fastapi_mail import FastMail
from sqlalchemy.orm import Session
from starlette.requests import Request
from starlette.responses import Response, JSONResponse
import smtplib
from ..database import SessionLocal, engine
from .. import schemas, models
from ..auth import(
create_user,
authenticate_user,
get_user_by_email,
verify_token,
create_access_token,
save_access_token,
delete_access_token,
CookieVerificationError
)
# Dependency
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
auth_route = APIRouter()
html = """
<html>
<body>
<p>Hi This test mail
<br>Thanks for using Fastapi-mail</p>
</body>
</html>
"""
template = """
<html>
<body>
<p>Hi Please confirm your registration.
<br> To log in please click the link below.
<br> <a href="http://localhost:3000/mainPage">Login!</a>
<br>Thank you.</p>
</body>
</html>
"""
@auth_route.post("/api/v1/register")
async def register_user(user: schemas.UserCreate, background_tasks: BackgroundTasks, db: Session = Depends(get_db)) -> models.User:
if user.password != user.repassword:
raise HTTPException(status_code=400, detail="Passwords don't match.")
db_user = get_user_by_email(db, email=user.email_address)
g_mail = "<EMAIL>"
g_pass = "<PASSWORD>"
if db_user:
raise HTTPException(status_code=400, detail="Email already exists.")
us = create_user(db, user)
us.user_token = jsonable_encoder(create_access_token(data={"sub": us.email_address}))
save_access_token(db, us)
response = Response(status_code=200)
response.set_cookie(
key="username",
value=us.email_address
)
mail = FastMail(email=g_mail,password=<PASSWORD>,tls=True,port="587",service="gmail")
background_tasks.add_task(mail.send_message, recipient=us.email_address,subject="Confirm your registration.",body=template,text_format="html")
return us
@auth_route.post("/api/v1/login")
async def login_user(form_data: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(get_db)) -> Response:
user = authenticate_user(db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
user.user_token = jsonable_encoder(create_access_token(data={"sub": user.email_address}))
save_access_token(db, user)
response = Response(status_code=200)
response.set_cookie(
key="username",
value=form_data.username
)
return response
# @auth_route.get("/api/v1/get-access-token")
# async def get_access_token(requests: Request, db: Session = Depends(get_db)) -> Response:
# try:
# username = requests.cookies["username"]
# verify_token(db, username)
# response = Response()
# response.set_cookie(
# key="username",
# value=username
# )
# return response
# except CookieVerificationError:
# raise HTTPException(status_code=HTTP_400_BAD_REQUEST)
@auth_route.get("/api/v1/logout")
async def logout_user(request: Request, db: Session = Depends(get_db)) -> Response:
user_cookie = request.cookies["username"]
delete_access_token(db, user_cookie)
response = Response()
response.delete_cookie(key="username")
return response | StarcoderdataPython |
8153031 | # SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
from itertools import chain
import pandas
from decisionengine.framework.logicengine.BooleanExpression import BooleanExpression
from decisionengine.framework.logicengine.RuleEngine import RuleEngine
from decisionengine.framework.modules.Module import Module
def passthrough_configuration(publisher_names):
"""Assembles logic-engine configuration to unconditionally execute all publishers."""
if len(publisher_names) == 0:
return {}
return {
"logic_engine": {
"module": "decisionengine.framework.logicengine.LogicEngine",
"parameters": {
"facts": {},
"rules": {"r1": {"expression": "True", "actions": list(publisher_names)}},
},
}
}
class LogicEngine(Module):
def __init__(self, cfg):
super().__init__(cfg)
self.facts = {name: BooleanExpression(expr) for name, expr in cfg["facts"].items()}
self.rule_engine = RuleEngine(cfg["facts"].keys(), cfg["rules"])
def produces(self):
self.logger.debug("in LE::produces()")
return ["actions", "newfacts"]
def consumes(self):
"""Return the names of all the items that must be in the DataBlock for
the rules to be evaluated.
"""
self.logger.debug("in LE::consumes()")
list_of_lists = [f.required_names for f in self.facts.values()]
return list(set(chain(*list_of_lists)))
def evaluate_facts(self, db):
"""
:type db: :obj:`DataBlock`
:arg db: Products used to evaluate facts.
:rtype: dict
:returns: Evaluated fact values (e.g. True or False) for each fact name.
"""
try:
return {name: f.evaluate(db) for name, f in self.facts.items()}
except NameError as e:
msg = f"The following error was encountered: {e}\n"
if len(db) == 0:
msg += "No fact names are available."
else:
msg += "Allowed fact names are:\n"
for key in db:
msg += " '" + key + "'\n"
self.logger.error(msg)
raise e
except Exception as e:
self.logger.exception("Unexpected exception while evaluating facts.")
raise e
def evaluate(self, db):
"""
Evaluate our facts and rules, in the context of the given data.
db can be any mappable, in particular a DataBlock or dictionary.
:type db: :obj:`DataBlock`
:arg db: Products used to evaluate facts.
"""
self.logger.info("LE: calling evaluate_facts")
evaluated_facts = self.evaluate_facts(db)
for key, val in evaluated_facts.items():
self.logger.info(f"Evaluated Fact: {key} -> Value: {val} -> TypeOf(Value): {type(val)}")
# Process rules
self.logger.info("LE: calling execute")
actions, newfacts = self.rule_engine.execute(evaluated_facts)
return (actions, self._create_facts_dataframe(newfacts))
def _create_facts_dataframe(self, newfacts):
"""
Convert newfacts dict in format below to dataframe with columns
['rule_name', 'fact_name', fact_value']
facts dict format:
'newfacts': {
'publish_glidein_requests': {
'allow_hpc_new': True,
'allow_foo': True
},
'dummy_rule': {
'dummy_new_fact': True
}
}
"""
self.logger.debug("in LE::_create_facts_dataframe")
# Extract new facts from le_result
# Dataframe column values for Facts
rule_name = []
fact_name = []
fact_value = []
for rule in newfacts:
facts = newfacts[rule]
rule_name += [rule] * len(facts)
fact_name += facts.keys()
fact_value += facts.values()
facts = {"rule_name": rule_name, "fact_name": fact_name, "fact_value": fact_value}
return pandas.DataFrame(facts)
| StarcoderdataPython |
4966511 | # -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2012-2020 iSolver Software Solutions (C) 2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import math
from psychopy.iohub.constants import EventConstants, EyeTrackerConstants
from psychopy.iohub.devices import Computer, Device
from psychopy.iohub.devices.eyetracker import EyeTrackerDevice
from psychopy.iohub.devices.eyetracker.hw.tobii.tobiiCalibrationGraphics import TobiiPsychopyCalibrationGraphics
from psychopy.iohub.devices.eyetracker.eye_events import *
from psychopy.iohub.errors import print2err, printExceptionDetailsToStdErr
try:
from .tobiiwrapper import TobiiTracker
except Exception:
print2err('Error importing tobiiwrapper.TobiiTracker')
printExceptionDetailsToStdErr()
class EyeTracker(EyeTrackerDevice):
"""
To start iohub with a Tobii eye tracker device, add the Tobii
device to the dictionary passed to launchHubServer or the
experiment's iohub_config.yaml::
eyetracker.hw.tobii.EyeTracker
Examples:
A. Start ioHub with a Tobii device and run tracker calibration::
from psychopy.iohub import launchHubServer
from psychopy.core import getTime, wait
iohub_config = {'eyetracker.hw.tobii.EyeTracker':
{'name': 'tracker', 'runtime_settings': {'sampling_rate': 120}}}
io = launchHubServer(**iohub_config)
# Get the eye tracker device.
tracker = io.devices.tracker
# run eyetracker calibration
r = tracker.runSetupProcedure()
B. Print all eye tracker events received for 2 seconds::
# Check for and print any eye tracker events received...
tracker.setRecordingState(True)
stime = getTime()
while getTime()-stime < 2.0:
for e in tracker.getEvents():
print(e)
C. Print current eye position for 5 seconds::
# Check for and print current eye position every 100 msec.
stime = getTime()
while getTime()-stime < 5.0:
print(tracker.getPosition())
wait(0.1)
tracker.setRecordingState(False)
# Stop the ioHub Server
io.quit()
"""
_tobii = None
DEVICE_TIMEBASE_TO_SEC = 0.000001
EVENT_CLASS_NAMES = [
'MonocularEyeSampleEvent',
'BinocularEyeSampleEvent',
'FixationStartEvent',
'FixationEndEvent',
'SaccadeStartEvent',
'SaccadeEndEvent',
'BlinkStartEvent',
'BlinkEndEvent']
__slots__ = []
def __init__(self, *args, **kwargs):
EyeTrackerDevice.__init__(self, *args, **kwargs)
if self.model_name:
self.model_name = self.model_name.strip()
if len(self.model_name) == 0:
self.model_name = None
model_name = self.model_name
serial_num = self.getConfiguration().get('serial_number')
EyeTracker._tobii = None
try:
EyeTracker._tobii = TobiiTracker(serial_num, model_name)
except Exception:
print2err('Error creating Tobii Device class')
printExceptionDetailsToStdErr()
# Apply license file if needed
try:
license_file = self.getConfiguration().get('license_file', "")
if license_file != "":
with open(license_file, "rb") as f:
license = f.read()
res = self._tobii._eyetracker.apply_licenses(license)
if len(res) == 0:
print2err("Successfully applied Tobii license from: {}".format(license_file))
else:
print2err("Error: Failed to apply Tobii license from single key. "
"Validation result: %s." % (res[0].validation_result))
else:
print2err("No Tobii license_file in config. Skipping.")
except Exception:
print2err("Error calling Tobii.apply_licenses with file {}.".format(license_file))
printExceptionDetailsToStdErr()
srate = self._runtime_settings['sampling_rate']
if srate and srate in self._tobii.getAvailableSamplingRates():
self._tobii.setSamplingRate(srate)
self._latest_sample = None
self._latest_gaze_position = None
def trackerTime(self):
"""Current eye tracker time in the eye tracker's native time base. The
Tobii system uses a usec timebase.
Args:
None
Returns:
float: current native eye tracker time. (in usec for the Tobii)
"""
if self._tobii:
return self._tobii.getCurrentEyeTrackerTime()
return EyeTrackerConstants.EYETRACKER_ERROR
def trackerSec(self):
"""Current eye tracker time, normalized to sec.msec format.
Args:
None
Returns:
float: current native eye tracker time in sec.msec-usec format.
"""
if self._tobii:
return self._tobii.getCurrentEyeTrackerTime() * self.DEVICE_TIMEBASE_TO_SEC
return EyeTrackerConstants.EYETRACKER_ERROR
def setConnectionState(self, enable):
"""
setConnectionState is a no-op when using the Tobii system, as the
connection is established when the Tobii EyeTracker classes are created,
and remains active until the program ends, or a error occurs resulting
in the loss of the tracker connection.
Args:
enable (bool): True = enable the connection, False = disable the connection.
Return:
bool: indicates the current connection state to the eye tracking hardware.
"""
if self._tobii:
return True
return False
def isConnected(self):
"""isConnected returns whether the Tobii is connected to the experiment
PC and if the tracker state is valid. Returns True if the tracker can
be put into Record mode, etc and False if there is an error with the
tracker or tracker connection with the experiment PC.
Args:
None
Return:
bool: True = the eye tracking hardware is connected. False otherwise.
"""
if self._tobii:
return True
return False
def sendMessage(self, message_contents, time_offset=None):
"""The sendMessage method is not supported by the Tobii implementation
of the Common Eye Tracker Interface, as the Tobii SDK does not support
saving eye data to a native data file during recording."""
return EyeTrackerConstants.EYETRACKER_INTERFACE_METHOD_NOT_SUPPORTED
def sendCommand(self, key, value=None):
"""The sendCommand method is not supported by the Tobii Common Eye
Tracker Interface."""
return EyeTrackerConstants.EYETRACKER_INTERFACE_METHOD_NOT_SUPPORTED
def runSetupProcedure(self, calibration_args={}):
"""runSetupProcedure performs a calibration routine for the Tobii eye
tracking system.
"""
try:
genv = TobiiPsychopyCalibrationGraphics(self, calibration_args)
calibrationOK = genv.runCalibration()
# On some graphics cards, we have to minimize before closing or the calibration window will stay visible
# after close is called.
genv.window.winHandle.set_visible(False)
genv.window.winHandle.minimize()
genv.window.close()
genv._unregisterEventMonitors()
genv.clearAllEventBuffers()
return calibrationOK
except Exception:
print2err('Error during runSetupProcedure')
printExceptionDetailsToStdErr()
return EyeTrackerConstants.EYETRACKER_ERROR
def enableEventReporting(self, enabled=True):
"""enableEventReporting is functionally identical to the eye tracker
device specific enableEventReporting method."""
try:
enabled = EyeTrackerDevice.enableEventReporting(self, enabled)
self.setRecordingState(enabled)
return enabled
except Exception as e:
print2err('Error during enableEventReporting')
printExceptionDetailsToStdErr()
return EyeTrackerConstants.EYETRACKER_ERROR
def setRecordingState(self, recording):
"""setRecordingState is used to start or stop the recording of data
from the eye tracking device.
args:
recording (bool): if True, the eye tracker will start recordng available
eye data and sending it to the experiment program if data streaming
was enabled for the device. If recording == False, then the eye
tracker stops recording eye data and streaming it to the experiment.
If the eye tracker is already recording, and setRecordingState(True) is
called, the eye tracker will simple continue recording and the method call
is a no-op. Likewise if the system has already stopped recording and
setRecordingState(False) is called again.
Args:
recording (bool): if True, the eye tracker will start recordng data.; false = stop recording data.
Return:
bool: the current recording state of the eye tracking device
"""
if self._tobii and recording is True and not self.isRecordingEnabled():
#ioHub.print2err("Starting Tracking... ")
self._tobii.startTracking(self._handleNativeEvent)
return EyeTrackerDevice.enableEventReporting(self, True)
elif self._tobii and recording is False and self.isRecordingEnabled():
self._tobii.stopTracking()
#ioHub.print2err("Stopping Tracking... ")
self._latest_sample = None
self._latest_gaze_position = None
return EyeTrackerDevice.enableEventReporting(self, False)
return self.isRecordingEnabled()
def isRecordingEnabled(self):
"""isRecordingEnabled returns the recording state from the eye tracking
device.
Args:
None
Return:
bool: True == the device is recording data; False == Recording is not occurring
"""
if self._tobii:
return self._tobii._isRecording
return False
def getLastSample(self):
"""Returns the latest sample retrieved from the Tobii device. The Tobii
system always using the BinocularSample Event type.
Args:
None
Returns:
None: If the eye tracker is not currently recording data.
EyeSample: If the eye tracker is recording in a monocular tracking mode, the latest sample event of this event type is returned.
BinocularEyeSample: If the eye tracker is recording in a binocular tracking mode, the latest sample event of this event type is returned.
"""
return self._latest_sample
def getLastGazePosition(self):
"""Returns the latest 2D eye gaze position retrieved from the Tobii
device. This represents where the eye tracker is reporting each eye
gaze vector is intersecting the calibrated surface.
In general, the y or vertical component of each eyes gaze position should
be the same value, since in typical user populations the two eyes are
yoked vertically when they move. Therefore any difference between the
two eyes in the y dimension is likely due to eye tracker error.
Differences between the x, or horizontal component of the gaze position,
indicate that the participant is being reported as looking behind or
in front of the calibrated plane. When a user is looking at the
calibration surface , the x component of the two eyes gaze position should be the same.
Differences between the x value for each eye either indicates that the
user is not focussing at the calibrated depth, or that there is error in the eye data.
The above remarks are true for any eye tracker in general.
The getLastGazePosition method returns the most recent eye gaze position
retieved from the eye tracker device. This is the position on the
calibrated 2D surface that the eye tracker is reporting as the current
eye position. The units are in the units in use by the Display device.
If binocular recording is being performed, the average position of both
eyes is returned.
If no samples have been received from the eye tracker, or the
eye tracker is not currently recording data, None is returned.
Args:
None
Returns:
None: If the eye tracker is not currently recording data or no eye samples have been received.
tuple: Latest (gaze_x,gaze_y) position of the eye(s)
"""
return self._latest_gaze_position
def _setSamplingRate(self, sampling_rate):
return self._tobii.setSamplingRate(sampling_rate)
def _poll(self):
"""The Tobii system uses a callback approach to providing new eye data
as it becomes available, so polling (and therefore this method) are not
used."""
pass
def _handleNativeEvent(self, *args, **kwargs):
"""This method is called every time there is new eye data available
from the Tobii system, which will be roughly equal to the sampling rate
eye data is being recorded at.
The callback needs to return as quickly as possible so there is
no chance of overlapping calls being made to the callback.
Therefore this method simply puts the event data received from
the eye tracker device, and the local ioHub time the callback
was called, into a buffer for processing by the ioHub event
system.
"""
if self.isReportingEvents():
try:
logged_time = Computer.getTime()
tobii_logged_time = self._tobii.getCurrentLocalTobiiTime() * self.DEVICE_TIMEBASE_TO_SEC
eye_data_event = args[0]
data_delay = tobii_logged_time - (eye_data_event['system_time_stamp'] * self.DEVICE_TIMEBASE_TO_SEC)
device_event_time = eye_data_event['device_time_stamp']
iohub_event_time = (logged_time - data_delay)
self._addNativeEventToBuffer(
(logged_time,
device_event_time,
iohub_event_time,
data_delay,
eye_data_event))
return True
except Exception:
print2err('ERROR IN _handleNativeEvent')
printExceptionDetailsToStdErr()
else:
print2err(
'self._handleNativeEvent called but isReportingEvents == false')
def _getIOHubEventObject(self, native_event_data):
"""The _getIOHubEventObject method is called by the ioHub Server to
convert new native device event objects that have been received to the
appropriate ioHub Event type representation.
The Tobii ioHub eye tracker implementation uses a callback method
to register new native device events with the ioHub Server.
Therefore this method converts the native Tobii event data into
an appropriate ioHub Event representation.
Args:
native_event_data: object or tuple of (callback_time, native_event_object)
Returns:
tuple: The appropriate ioHub Event type in list form.
"""
try:
logged_time, device_event_time, iohub_event_time, data_delay, eye_data_event = native_event_data
event_type = EventConstants.BINOCULAR_EYE_SAMPLE
left_gaze_x, left_gaze_y = eye_data_event['left_gaze_point_on_display_area']
right_gaze_x, right_gaze_y = eye_data_event['right_gaze_point_on_display_area']
status = 0
if eye_data_event['left_gaze_point_validity'] > 0:
left_gaze_x, left_gaze_y = self._eyeTrackerToDisplayCoords(
(left_gaze_x, left_gaze_y))
else:
status += 20
if eye_data_event['right_gaze_point_validity'] > 0:
right_gaze_x, right_gaze_y = self._eyeTrackerToDisplayCoords(
(right_gaze_x, right_gaze_y))
else:
status += 2
right_gx, right_gy, right_gz = eye_data_event['right_gaze_origin_in_trackbox_coordinate_system']
left_gx, left_gy, left_gz = eye_data_event['left_gaze_origin_in_trackbox_coordinate_system']
confidenceInterval = 0.0
binocSample = [
0,
0,
0, # device id (not currently used)
Device._getNextEventID(),
event_type,
device_event_time,
logged_time,
iohub_event_time,
confidenceInterval,
data_delay,
0, # filtered id (always 0 right now)
left_gaze_x,
left_gaze_y,
EyeTrackerConstants.UNDEFINED,
left_gx,
left_gy,
left_gz,
EyeTrackerConstants.UNDEFINED, # Left Eye Angle x
EyeTrackerConstants.UNDEFINED, # Left Eye Angle y
EyeTrackerConstants.UNDEFINED, # Left Camera Sensor position x
EyeTrackerConstants.UNDEFINED, # Left Camera Sensor position y
eye_data_event['left_pupil_diameter'],
EyeTrackerConstants.PUPIL_DIAMETER_MM,
EyeTrackerConstants.UNDEFINED, # Left pupil size measure 2
EyeTrackerConstants.UNDEFINED, # Left pupil size measure 2 type
EyeTrackerConstants.UNDEFINED, # Left PPD x
EyeTrackerConstants.UNDEFINED, # Left PPD y
EyeTrackerConstants.UNDEFINED, # Left velocity x
EyeTrackerConstants.UNDEFINED, # Left velocity y
EyeTrackerConstants.UNDEFINED, # Left velocity xy
right_gaze_x,
right_gaze_y,
EyeTrackerConstants.UNDEFINED, # Right Eye Angle z
right_gx,
right_gy,
right_gz,
EyeTrackerConstants.UNDEFINED, # Right Eye Angle x
EyeTrackerConstants.UNDEFINED, # Right Eye Angle y
EyeTrackerConstants.UNDEFINED, # Right Camera Sensor position x
EyeTrackerConstants.UNDEFINED, # Right Camera Sensor position y
eye_data_event['right_pupil_diameter'],
EyeTrackerConstants.PUPIL_DIAMETER_MM,
EyeTrackerConstants.UNDEFINED, # Right pupil size measure 2
EyeTrackerConstants.UNDEFINED, # Right pupil size measure 2 type
EyeTrackerConstants.UNDEFINED, # Right PPD x
EyeTrackerConstants.UNDEFINED, # Right PPD y
EyeTrackerConstants.UNDEFINED, # right velocity x
EyeTrackerConstants.UNDEFINED, # right velocity y
EyeTrackerConstants.UNDEFINED, # right velocity xy
status
]
self._latest_sample = binocSample
if eye_data_event['left_gaze_point_validity'] == eye_data_event['right_gaze_point_validity'] == 0:
self._latest_gaze_position = None
elif eye_data_event['left_gaze_point_validity'] == eye_data_event['right_gaze_point_validity'] == 1:
self._latest_gaze_position = [(right_gaze_x + left_gaze_x) / 2.0,
(right_gaze_y + left_gaze_y) / 2.0]
elif eye_data_event['left_gaze_point_validity'] == 1:
self._latest_gaze_position = [left_gaze_x, left_gaze_y]
elif eye_data_event['right_gaze_point_validity'] == 1:
self._latest_gaze_position = [right_gaze_x, right_gaze_y]
self._last_callback_time = logged_time
return binocSample
except Exception:
printExceptionDetailsToStdErr()
return None
def _eyeTrackerToDisplayCoords(self, eyetracker_point):
"""Converts Tobii gaze positions to the Display device coordinate
space."""
gaze_x, gaze_y = eyetracker_point
left, top, right, bottom = self._display_device.getCoordBounds()
w, h = right - left, top - bottom
x, y = left + w * gaze_x, bottom + h * (1.0 - gaze_y)
return x, y
def _displayToEyeTrackerCoords(self, display_x, display_y):
"""Converts a Display device point to Tobii gaze position coordinate
space."""
left, top, right, bottom = self._display_device.getCoordBounds()
w, h = right - left, top - bottom
return (left - display_x) / w, (top - display_y) / h
def _close(self):
if EyeTracker._tobii:
EyeTracker._tobii.disconnect()
EyeTracker._tobii = None
EyeTrackerDevice._close(self)
| StarcoderdataPython |
39317 | import requests
import json
import time
import os
import sys
green = "\x1b[38;2;0;255;0m"
greenish = "\x1b[38;2;93;173;110m"
red = "\x1b[38;2;255;0;0m"
grey = "\x1b[38;2;193;184;192m"
reset = "\033[0m"
clear_line = "\033[0K"
# Maximum repository size in megabytes
MAX_REPO_SIZE = 5
def load_cache():
result = []
seen = set()
for file in os.listdir("sources"):
if file.endswith(".json"):
text = open("sources/" + file).read()
for item in json.loads(text)["items"]:
size_in_mb = item["size"]/1000
if size_in_mb > MAX_REPO_SIZE:
continue
url = item["clone_url"]
if url not in seen:
seen.add(url)
result.append({
"url": item["clone_url"],
"score": item["stargazers_count"] + 0.5*item["watchers_count"] + 1,
})
return result
def create_cache():
print(green + "Searching github..." + reset)
if not os.path.exists("sources"):
os.mkdir("sources")
queries = ["utilities", "", "useful", "tools"]
for query_id, query in enumerate(queries):
page = 1
print("\r" + clear_line + "\n" + greenish +
"Searching using query '{}' (id: {})".format(query, query_id))
while page < 100:
path = "sources/query_{}_page_{}.json".format(query_id, page)
if os.path.exists(path):
print("\r" + clear_line + green +
"Skipping page {}, using data from cache...".format(page) + reset)
page += 1
continue
else:
print("\r" + clear_line + green +
"Searching page {}... ".format(page) + reset, end="")
r = requests.get(
"https://api.github.com/search/repositories?q={}+language:java&sort=stars&order=desc&page={}".format(query, page))
if not r.ok:
if "Only the first 1000 search results are available" in r.text:
print(
"limit reached: only the first 1000 search results are available")
break
print(red + "Query failed\n" + r.text + reset)
print("Sleeping for some time before retrying")
time.sleep(10)
continue
try:
data = json.loads(r.text)
except Exception as e:
print("Json parsing failed")
print(e)
print("Sleeping for some time before retrying", end="")
time.sleep(10)
continue
print(green + "done" + reset)
with open(path, "w") as f:
f.write(json.dumps(data))
page += 1
# Github rate limit of 10 requests per minute
print(grey + "Sleeping due to rate limit..." + reset, end="")
sys.stdout.flush()
time.sleep(60/10)
if __name__ == "__main__":
create_cache()
print(load_cache())
| StarcoderdataPython |
8184302 | <gh_stars>0
from collections import OrderedDict
import einops
import torch
from torch import Tensor
from torch.nn import (
CrossEntropyLoss,
GRU,
Module,
Linear,
Sequential,
Tanh,
)
from torch.optim import Adam
from torch.optim.optimizer import Optimizer
from torchaudio.transforms import MelSpectrogram
class Encoder(Module):
def __init__(
self,
input_size: int=40,
hidden_size: int=128,
num_layers: int=1,
):
super().__init__()
self.cnn = None
self.rnn = GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
)
def forward(
self,
x: Tensor,
) -> Tensor:
#x_1 = self.cnn(x)
x_1 = x
output, hidden = self.rnn(
input=x_1,
)
return output
class AverageAttention(Module):
def __init__(
self,
T: int,
):
super().__init__()
self.T = T
def forward(
self,
x: Tensor,
) -> Tensor:
alpha = torch.full(
size=(x.shape[0], self.T),
fill_value=1 / self.T,
)
alpha = einops.rearrange(alpha, 'h (w 1) -> h w 1')
return alpha
class SoftAttention(Module):
def __init__(
self,
):
super().__init__()
self.blocks_ordered_dict = OrderedDict(
Wb=Linear(#TODO
in_channels=None,
out_channels=None,
),
tanh=Tanh(),
v=Linear(
in_features=None,
out_features=None,
bias=False,
),
softmax=Softmax(),
)
self.alpher = Sequential(self.blocks)
def forward(
self,
x: Tensor,
):
alpha = self.alpher(x)
return alpha
class AttentionSpotter(Module):
def __init__(
self,
T: int,
in_channels: int=40,
hidden_size: int=128,
learning_rate: float=3e-4,
device=torch.device('cpu'),
):
super().__init__()
self.T = T
self.device = device
self.learning_rate = learning_rate
self.criterion = CrossEntropyLoss()
self.mel_spectrogramer = MelSpectrogram(
#n_fft=1024,
sample_rate=16000,
#win_length=1024,
#hop_length=256,
#f_min=0,
#f_max=800,
n_mels=in_channels,
).to(self.device)
self.encoder = Encoder(
input_size=in_channels,
hidden_size=hidden_size,
num_layers=1,
)
self.attention = AverageAttention(
T=self.T,
)
self.epilog_ordered_dict = OrderedDict(
U=Linear(
in_features=hidden_size,
out_features=3,
bias=False,
),
#softmax=Softmax(), #TODO remove
)
self.epilog = Sequential(self.epilog_ordered_dict)
def forward(
self,
x: Tensor,
) -> Tensor:
h = self.encoder(x)
alpha = self.attention(h)
c_0 = alpha * h
c = (alpha * h).sum(dim=1)
p = self.epilog(c)
return p
def training_step(
self,
batch: Tensor,
batch_idx: int,
) -> Tensor:
waveforms, targets = batch
waveforms = waveforms.to(self.device)
targets = targets.to(self.device)
mel_spec = self.mel_spectrogramer(waveforms)
transposed_mel_spec = einops.rearrange(mel_spec, 'bs w h -> bs h w')
predictions = self(torch.log(transposed_mel_spec))
loss = self.criterion(
input=predictions,
target=targets,
)
return loss
def training_step_end(self):
pass
def training_epoch_end(self):
print("Training epoch is over!")
def validation_step(self, batch, batch_idx):
pass
def validation_step_end(self):
pass
def validation_epoch_end(self):
print("Validation epoch is over!")
def configure_optimizers(self) -> Optimizer:
optimizer = Adam(
params=self.parameters(),
lr=self.learning_rate,
)
return optimizer
| StarcoderdataPython |
1636841 | from concurrent.futures import process
from transformers import pipeline
import re
import torch
class PunctuationModel():
def __init__(self, model = "oliverguhr/fullstop-punctuation-multilang-large") -> None:
if torch.cuda.is_available():
self.pipe = pipeline("ner",model, grouped_entities=False, device=0)
else:
self.pipe = pipeline("ner",model, grouped_entities=False)
def preprocess(self,text):
#remove markers except for markers in numbers
text = re.sub(r"(?<!\d)[.,;:!?](?!\d)","",text)
#todo: match acronyms https://stackoverflow.com/questions/35076016/regex-to-match-acronyms
text = text.split()
return text
def restore_punctuation(self,text):
result = self.predict(self.preprocess(text))
return self.prediction_to_text(result)
def overlap_chunks(self,lst, n, stride=0):
"""Yield successive n-sized chunks from lst with stride length of overlap."""
for i in range(0, len(lst), n-stride):
yield lst[i:i + n]
def predict(self,words):
overlap = 5
chunk_size = 230
if len(words) <= chunk_size:
overlap = 0
batches = list(self.overlap_chunks(words,chunk_size,overlap))
# if the last batch is smaller than the overlap,
# we can just remove it
if len(batches[-1]) <= overlap:
batches.pop()
tagged_words = []
for batch in batches:
# use last batch completely
if batch == batches[-1]:
overlap = 0
text = " ".join(batch)
result = self.pipe(text)
assert len(text) == result[-1]["end"], "chunk size too large, text got clipped"
char_index = 0
result_index = 0
for word in batch[:len(batch)-overlap]:
char_index += len(word) + 1
# if any subtoken of an word is labled as sentence end
# we label the whole word as sentence end
label = 0
while result_index < len(result) and char_index > result[result_index]["end"] :
label = result[result_index]['entity']
score = result[result_index]['score']
result_index += 1
tagged_words.append([word,label, score])
assert len(tagged_words) == len(words)
return tagged_words
def prediction_to_text(self,prediction):
result = ""
for word, label, _ in prediction:
result += word
if label == "0":
result += " "
if label in ".,?-:":
result += label+" "
return result.strip()
if __name__ == "__main__":
model = PunctuationModel()
text = "das , ist fies "
# restore add missing punctuation
result = model.restore_punctuation(text)
print(result)
clean_text = model.preprocess(text)
labled_words = model.predict(clean_text)
print(labled_words) | StarcoderdataPython |
12850064 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME>
#
import os
import re
import time
import xml.etree.ElementTree as ET
from enum import Enum
from datetime import datetime
from omsdk.sdkprint import PrettyPrint
from omsdk.sdkcenum import EnumWrapper, TypeHelper
from omsdk.lifecycle.sdklicenseapi import iBaseLicenseApi
from omdrivers.lifecycle.iDRAC.iDRACConfig import LicenseApiOptionsEnum
import base64
import sys
import logging
logger = logging.getLogger(__name__)
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
from pysnmp.hlapi import *
from pysnmp.smi import *
PySnmpPresent = True
except ImportError:
PySnmpPresent = False
from omdrivers.enums.iDRAC.iDRACEnums import *
class iDRACLicense(iBaseLicenseApi):
def __init__(self, entity):
if PY2:
super(iDRACLicense, self).__init__(entity)
else:
super().__init__(entity)
self._job_mgr = entity.job_mgr
self._config_mgr = entity.config_mgr
self._license_fqdds = []
def _get_license_json(self):
if not hasattr(self, 'license') or "License" not in self.license:
self.license = {}
self.entity._get_entries(self.license, iDRACLicenseEnum)
if "LicensableDevice" in self.license:
entries = self.license["LicensableDevice"]
if isinstance(entries, dict):
entries = [entries]
for entry in entries:
self._license_fqdds.append(entry["FQDD"])
return self.license
def _get_license_text(self, entitlementId):
retVal = self.entity._export_license(id=entitlementId)
ltext = self.entity._get_field_from_action(retVal,
"Data", "ExportLicense_OUTPUT", "LicenseFile")
if ltext:
retVal['License'] = base64.b64decode(ltext).decode("utf-8")
return retVal
def _save_license_text(self, entitlementId, folder):
retVal = self._get_license_text(entitlementId)
with open(os.path.join(folder, entitlementId), "wb") as output:
output.write(retVal['License'].encode('UTF-8'))
output.flush()
return os.path.join(folder, entitlementId)
def export_license(self, folder):
expLic = []
if not os.path.exists(folder):
os.makedirs(folder)
elif not os.path.isdir(folder):
# replace with exception
return []
self._get_license_json()
if not "License" in self.license:
# replace with exception
return []
llist = self.license["License"]
if isinstance(self.license["License"], dict):
llist = [llist]
for i in llist:
entitlementId = i["EntitlementID"]
expLic.append(self._save_license_text(entitlementId, folder))
return expLic
def export_license_share(self, license_share_path):
self._get_license_json()
if not "License" in self.license:
return {"l": False}
llist = self.license["License"]
if isinstance(self.license["License"], dict):
llist = [llist]
retval = {'Status': 'Success', 'Exported': 0, 'Failed to Export': 0}
for i in llist:
entitlementId = i["EntitlementID"]
rjson = self.entity._export_license_share(share=license_share_path,
creds=license_share_path.creds, id=entitlementId)
rjson = self._job_mgr._job_wait(rjson['Message'], rjson)
if rjson['Status'] == 'Success':
retval['Exported'] += 1
else:
retval['Failed to Export'] += 1
if retval['Exported'] == 0 and retval['Failed to Export'] > 0:
retval['Status'] = 'Failed'
return retval
def _import_license_fqdd(self, license_file, fqdd="iDRAC.Embedded.1", options=LicenseApiOptionsEnum.NoOptions):
if not os.path.exists(license_file) or not os.path.isfile(license_file):
logger.debug(license_file + " is not a file!")
return False
content = ''
with open(license_file, 'rb') as f:
content = f.read()
content = bytearray(base64.b64encode(content))
for i in range(0, len(content) + 65, 65):
content[i:i] = '\n'.encode()
return self.entity._import_license(fqdd=fqdd,
options=options, file=content.decode())
def _import_license_share_fqdd(self, license_share_path, fqdd="iDRAC.Embedded.1",
options=LicenseApiOptionsEnum.NoOptions):
self._get_license_json()
if not "License" in self.license:
return False
llist = self.license["License"]
if isinstance(self.license["License"], dict):
llist = [llist]
retval = {'Status': 'Success', 'Imported': 0, 'Failed to Import': 0}
for i in llist:
entitlementId = i["EntitlementID"]
rjson = self.entity._import_license_share(share=license_share_path,
creds=license_share_path.creds, name="Import",
fqdd=fqdd, options=options)
rjson = self._job_mgr._job_wait(rjson['Message'], rjson)
logger.debug(rjson)
if rjson['Status'] == 'Success':
retval['Imported'] += 1
else:
retval['Failed to Import'] += 1
if retval['Imported'] == 0 and retval['Failed to Import'] > 0:
retval['Status'] = 'Failed'
return retval
def _replace_license_fqdd(self, license_file, entitlementId, fqdd="iDRAC.Embedded.1",
options=LicenseApiOptionsEnum.NoOptions):
if not os.path.exists(license_file) or not os.path.isfile(license_file):
logger.debug(license_file + " is not a file!")
return False
content = ''
with open(license_file) as f:
content = f.read()
return self.entity._replace_license(id=entitlementId,
fqdd=fqdd, options=options, file=content)
def _delete_license_fqdd(self, entitlementId, fqdd="iDRAC.Embedded.1", options=LicenseApiOptionsEnum.NoOptions):
return self.entity._delete_license(id=entitlementId,
fqdd=fqdd, options=options)
@property
def LicensableDeviceFQDDs(self):
self._get_license_json()
return self._license_fqdds
@property
def LicensableDevices(self):
self._get_license_json()
return list(self._config_mgr._fqdd_to_comp(self._license_fqdds))
@property
def Licenses(self):
self._get_license_json()
return self.license["License"]
def import_license(self, license_file, component="iDRAC", options=LicenseApiOptionsEnum.NoOptions):
fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])
return self._import_license_fqdd(license_file, fqdd=fqddlist[0], options=options)
def import_license_share(self, license_share_path, component="iDRAC", options=LicenseApiOptionsEnum.NoOptions):
fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])
return self._import_license_share_fqdd(license_share_path, fqdd=fqddlist[0], options=options)
def replace_license(self, license_file, entitlementId, component="iDRAC", options=LicenseApiOptionsEnum.NoOptions):
fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])
return self._replace_license_fqdd(license_file, entitlementId, fqdd=fqddlist[0], options=options)
def delete_license(self, entitlementId, component="iDRAC", options=LicenseApiOptionsEnum.NoOptions):
fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])
return self._delete_license_fqdd(entitlementId, fqdd=fqddlist[0], options=options)
| StarcoderdataPython |
6631100 | <gh_stars>1-10
# Copyright (c) 2011-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import traceback
import test.functional as tf
from test.functional.s3api.s3_test_client import (
Connection, get_boto3_conn, tear_down_s3)
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class S3ApiBase(unittest.TestCase):
def __init__(self, method_name):
super(S3ApiBase, self).__init__(method_name)
self.method_name = method_name
def setUp(self):
if 's3api' not in tf.cluster_info:
raise tf.SkipTest('s3api middleware is not enabled')
try:
self.conn = Connection()
self.conn.reset()
except Exception:
message = '%s got an error during initialize process.\n\n%s' % \
(self.method_name, traceback.format_exc())
# TODO: Find a way to make this go to FAIL instead of Error
self.fail(message)
def assertCommonResponseHeaders(self, headers, etag=None):
"""
asserting common response headers with args
:param headers: a dict of response headers
:param etag: a string of md5(content).hexdigest() if not given,
this won't assert anything about etag. (e.g. DELETE obj)
"""
self.assertTrue(headers['x-amz-id-2'] is not None)
self.assertTrue(headers['x-amz-request-id'] is not None)
self.assertTrue(headers['date'] is not None)
# TODO; requires consideration
# self.assertTrue(headers['server'] is not None)
if etag is not None:
self.assertTrue('etag' in headers) # sanity
self.assertEqual(etag, headers['etag'].strip('"'))
class S3ApiBaseBoto3(S3ApiBase):
def setUp(self):
if 's3api' not in tf.cluster_info:
raise tf.SkipTest('s3api middleware is not enabled')
try:
self.conn = get_boto3_conn()
self.endpoint_url = self.conn._endpoint.host
self.access_key = self.conn._request_signer._credentials.access_key
self.region = self.conn._client_config.region_name
tear_down_s3(self.conn)
except Exception:
message = '%s got an error during initialize process.\n\n%s' % \
(self.method_name, traceback.format_exc())
# TODO: Find a way to make this go to FAIL instead of Error
self.fail(message)
def tearDown(self):
tear_down_s3(self.conn)
| StarcoderdataPython |
12859456 | <reponame>yingbiaoluo/ocr_pytorch
import os
import cv2
import glob
import logging
import numpy as np
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.distributed as dist
class strLabelConverter(object):
def __init__(self, alphabet_):
"""
字符串标签转换
"""
self.alphabet = alphabet_ + 'Ω'
self.dict = {}
for i, char in enumerate(self.alphabet):
self.dict[char] = i + 1
def encode(self, text):
length = []
result = []
for item in text:
item = item.replace(' ', '').replace('\t', '')
length.append(len(item))
for char in item:
if char not in self.alphabet:
print('char {} not in alphabets!'.format(char))
char = '-'
index = self.dict[char]
result.append(index)
text = result
return torch.IntTensor(text), torch.IntTensor(length)
def decode(self, t, length, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1: # 元素个数只有一个 number of elements
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def generate_alphabets(alphabet_path):
"""
读取文本标签,生成字符表。
:param alphabet_path: 文本标签.
:return: 字符表.
"""
with open(alphabet_path, 'r', encoding='utf-8') as file:
alphabet = sorted(list(set(repr(''.join(file.readlines())))))
if ' ' in alphabet:
alphabet.remove(' ')
alphabet = ''.join(alphabet)
return alphabet
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def lev_ratio(str_a, str_b):
"""
ED距离,用来衡量单词之间的相似度
:param str_a:
:param str_b:
:return:
"""
str_a = str_a.lower()
str_b = str_b.lower()
matrix_ed = np.zeros((len(str_a) + 1, len(str_b) + 1), dtype=np.int)
matrix_ed[0] = np.arange(len(str_b) + 1)
matrix_ed[:, 0] = np.arange(len(str_a) + 1)
for i in range(1, len(str_a) + 1):
for j in range(1, len(str_b) + 1):
# 表示删除a_i
dist_1 = matrix_ed[i - 1, j] + 1
# 表示插入b_i
dist_2 = matrix_ed[i, j - 1] + 1
# 表示替换b_i
dist_3 = matrix_ed[i - 1, j - 1] + (2 if str_a[i - 1] != str_b[j - 1] else 0)
# 取最小距离
matrix_ed[i, j] = np.min([dist_1, dist_2, dist_3])
# print(matrix_ed)
levenshtein_distance = matrix_ed[-1, -1]
sum = len(str_a) + len(str_b)
levenshtein_ratio = (sum - levenshtein_distance) / sum
return levenshtein_ratio
def set_logging():
logging.basicConfig(
format="%(asctime)s %(message)s", # 指定输出的格式和内容, %(message)s: 打印日志信息
level=logging.INFO) # 设置日志级别 默认为logging.WARNING
def get_latest_run(search_dir='./runs'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def check_file(file):
# Search for file if not found
if os.path.isfile(file) or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file '**'匹配所有文件、目录、子目录和子目录里的文件
assert len(files), 'File Not Found: %s' % file # assert file was found
return files[0] # return first file if multiple found
def increment_dir(dir, comment=''):
# Increments a directory runs/exp1 --> runs/exp2_comment
n = 0 # number
dir = str(Path(dir)) # os-agnostic
d = sorted(glob.glob(dir + '*')) # directories
if len(d):
n = max([int(x[len(dir):x.find('_') if '_' in x else None]) for x in d]) + 1 # increment
return dir + str(n) + ('_' + comment if comment else '')
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def denoise(image):
"""
对灰色图片进行降噪(注:cv2.fastNlMeansDenoising函数处理时间较长,因此不宜采用该降噪函数)
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dst = cv2.fastNlMeansDenoising(image, None, h=10, templateWindowSize=7, searchWindowSize=21)
ret, image = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return image
def resize_padding(image, height, width):
# resize
h, w, c = image.shape
image = cv2.resize(image, (0, 0), fx=height / h, fy=height / h, interpolation=cv2.INTER_LINEAR)
# padding
h, w, c = image.shape
img = 255. * np.ones((height, width, c))
if w < width:
img[:, :w, :] = image
else:
r = height / h
img = cv2.resize(image, (0, 0), fx=r, fy=r, interpolation=cv2.INTER_LINEAR)
return img
def padding_image_batch(image_batch, height=32, width=480):
aspect_ratios = []
for image in image_batch:
h, w, c = image.shape
aspect_ratios.append(w/h)
max_len = int(np.ceil(32 * max(aspect_ratios)))
pad_len = max_len if max_len > width else width
imgs = []
for image in image_batch:
img = resize_padding(image, height, pad_len)
img = np.transpose(img, (2, 0, 1))
imgs.append(img)
img_batch = torch.from_numpy(np.array(imgs)) / 255.
return img_batch.float()
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if dist.is_available() and dist.is_initialized(): # True False
rank = dist.get_rank()
else:
rank = 0
# only rank 0 will add a FileHandler
if rank == 0 and log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
handlers.append(file_handler)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if rank == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
| StarcoderdataPython |
8013004 | <reponame>MetricRule/metricrule-agent-python
from unittest import TestCase, main
from metricrule.agent import WSGIMetricsMiddleware
class TestWsgiMiddleware(TestCase):
pass
if __name__ == 'main':
main()
| StarcoderdataPython |
96092 | from views import db
from _config import DATABASE_PATH
import sqlite3
# from datetime import datetime
# migration of tasks table
# with sqlite3.connect(DATABASE_PATH) as conn:
# c = conn.cursor()
# c.execute('ALTER TABLE tasks RENAME TO old_tasks')
# db.create_all()
# c.execute("""SELECT name, due_date, priority, status
# FROM old_tasks ORDER BY task_id ASC""")
# data = [(row[0], row[1], row[2], row[3], datetime.now(), 1)
# for row in c.fetchall()]
# c.executemany("""INSERT INTO tasks (name, due_date, priority, status,
# posted_date, user_id) VALUES (?, ?, ?, ?, ?, ?)""", data)
# c.execute('DROP TABLE old_tasks')
# migration of users table
with sqlite3.connect(DATABASE_PATH) as conn:
c = conn.cursor()
c.execute('ALTER TABLE users RENAME TO old_users')
db.create_all()
c.execute("""SELECT name, email, password
FROM old_users ORDER BY id ASC""")
data = [(row[0], row[1], row[2], 'user') for row in c.fetchall()]
c.executemany("""INSERT INTO users (name, email, password, role) VALUES
(?, ?, ?, ?)""", data)
c.execute('DROP TABLE old_users')
| StarcoderdataPython |
9707090 | <reponame>tmcclintock/PyDonJuan
from unittest import TestCase
from donjuan import Hallway, SquareCell
class HallwayTest(TestCase):
def setUp(self):
super().setUp()
self.cells = [SquareCell() for _ in range(3)]
def test_smoke(self):
h = Hallway()
assert h is not None
assert h.name == ""
def test_cells_ordered(self):
h = Hallway(self.cells)
assert h.start_cell is self.cells[0]
assert h.end_cell is self.cells[-1]
assert len(h.ordered_cells) == 3
assert len(h.cells) == 3
for cell in h.ordered_cells:
assert cell in h.cells
def test_get_coordinate_path(self):
cells = [SquareCell(coordinates=(i, 0)) for i in range(10)]
h = Hallway(cells)
for i, coords in enumerate(h.get_coordinate_path()):
assert coords == (i, 0)
| StarcoderdataPython |
8129263 | # -*- coding: utf-8 -*-
"""
Created on Wed May 15 18:25:03 2019
@author: <NAME>
"""
import tweepy
from tweepy import OAuthHandler
#insert Twitter Keys
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_KEY = ''
ACCESS_SECRET = ''
auth = OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
#auth = tweepy.AppAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
alltweets = []
def getTweet(screen_name):
alltweets.clear()
new_tweets = api.user_timeline(screen_name = screen_name,count=200,tweet_mode="extended")
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest,tweet_mode="extended")
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
replies_id=[]
replied = []
name=['BarcaWorldwide', 'Zuby2510', 'GhayasAdam']
id_parse = ['1195425989540663303']
count=0
id_count = len(id_parse)
name_count = len(name)
name_ind = 0
id_ind = 0
while id_count > 0:
while name_count > 0:
getTweet(name[name_ind])
for tweet in alltweets:
if hasattr(tweet, 'in_reply_to_status_id_str'):
if (tweet.in_reply_to_status_id_str==id_parse[id_ind]):
print(tweet.full_text)
id_parse.append(tweet.id_str)
id_count += 1
count += 1
replied.append(name[name_ind])
name_ind += 1
name_count -= 1
id_ind += 1
id_count -= 1
name_ind = 0
name_count = len(name)
print("Replies Count: ")
print(count)
print("Repliers screen_names: ")
print(replied) | StarcoderdataPython |
137402 | <filename>src/c3nav/editor/views/changes.py
from itertools import chain
from operator import itemgetter
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.cache import cache
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import NoReverseMatch, reverse
from django.utils.text import format_lazy
from django.utils.translation import get_language_info
from django.utils.translation import ugettext_lazy as _
from c3nav.editor.forms import ChangeSetForm, RejectForm
from c3nav.editor.models import ChangeSet
from c3nav.editor.views.base import sidebar_view
from c3nav.editor.wrappers import is_created_pk
from c3nav.mapdata.models.locations import LocationRedirect, LocationSlug
@sidebar_view(select_related=('last_update', 'last_state_update', 'last_change', 'author'))
def changeset_detail(request, pk):
changeset = request.changeset
active = True
if str(pk) != str(request.changeset.pk):
active = False
qs = ChangeSet.qs_for_request(request).select_related('last_update', 'last_state_update',
'last_change', 'author')
changeset = get_object_or_404(qs, pk=pk)
if not changeset.can_see(request):
raise Http404
can_edit = changeset.can_edit(request)
can_delete = changeset.can_delete(request)
if request.method == 'POST':
restore = request.POST.get('restore')
if restore and restore.isdigit():
with changeset.lock_to_edit(request) as changeset:
if changeset.can_edit(request):
try:
changed_object = changeset.changed_objects_set.get(pk=restore)
except Exception:
pass
else:
try:
changed_object.restore()
messages.success(request, _('Object has been successfully restored.'))
except PermissionError:
messages.error(request, _('You cannot restore this object, because it depends on '
'a deleted object or it would violate a unique contraint.'))
else:
messages.error(request, _('You can not edit changes on this change set.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
elif request.POST.get('activate') == '1':
with changeset.lock_to_edit(request) as changeset:
if not changeset.closed and changeset.can_edit:
changeset.activate(request)
messages.success(request, _('You activated this change set.'))
else:
messages.error(request, _('You can not activate this change set.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
elif request.POST.get('propose') == '1':
if not request.user.is_authenticated:
messages.info(request, _('You need to log in to propose changes.'))
return redirect(reverse('editor.login')+'?r='+request.path)
with changeset.lock_to_edit(request) as changeset:
if not changeset.title or not changeset.description:
messages.warning(request, _('You need to add a title an a description to propose this change set.'))
return redirect(reverse('editor.changesets.edit', kwargs={'pk': changeset.pk}))
if changeset.can_propose(request):
changeset.propose(request.user)
messages.success(request, _('You proposed your changes.'))
else:
messages.error(request, _('You cannot propose this change set.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
elif request.POST.get('unpropose') == '1':
with changeset.lock_to_edit(request) as changeset:
if changeset.can_unpropose(request):
changeset.unpropose(request.user)
messages.success(request, _('You unproposed your changes.'))
else:
messages.error(request, _('You cannot unpropose this change set.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
elif request.POST.get('review') == '1':
with changeset.lock_to_edit(request) as changeset:
if changeset.can_start_review(request):
changeset.start_review(request.user)
messages.success(request, _('You are now reviewing these changes.'))
else:
messages.error(request, _('You cannot review these changes.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
elif request.POST.get('reject') == '1':
with changeset.lock_to_edit(request) as changeset:
if not changeset.can_end_review(request):
messages.error(request, _('You cannot reject these changes.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
if request.POST.get('reject_confirm') == '1':
form = RejectForm(data=request.POST)
if form.is_valid():
changeset.reject(request.user, form.cleaned_data['comment'], form.cleaned_data['final'])
messages.success(request, _('You rejected these changes.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
else:
form = RejectForm()
return render(request, 'editor/changeset_reject.html', {
'changeset': changeset,
'form': form,
})
elif request.POST.get('unreject') == '1':
with changeset.lock_to_edit(request) as changeset:
if not changeset.can_unreject(request):
messages.error(request, _('You cannot unreject these changes.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
changeset.unreject(request.user)
messages.success(request, _('You unrejected these changes.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
elif request.POST.get('apply') == '1':
with changeset.lock_to_edit(request) as changeset:
if not changeset.can_end_review(request):
messages.error(request, _('You cannot accept and apply these changes.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
if request.POST.get('apply_confirm') == '1':
changeset.apply(request.user)
messages.success(request, _('You accepted and applied these changes.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
return render(request, 'editor/changeset_apply.html', {})
elif request.POST.get('delete') == '1':
with changeset.lock_to_edit(request) as changeset:
if not changeset.can_delete(request):
messages.error(request, _('You cannot delete this change set.'))
if request.POST.get('delete_confirm') == '1':
changeset.delete()
messages.success(request, _('You deleted this change set.'))
if request.user.is_authenticated:
return redirect(reverse('editor.users.detail', kwargs={'pk': request.user.pk}))
else:
return redirect(reverse('editor.index'))
return render(request, 'editor/delete.html', {
'model_title': ChangeSet._meta.verbose_name,
'obj_title': changeset.title,
})
changeset.fill_changes_cache()
ctx = {
'changeset': changeset,
'can_edit': can_edit,
'can_delete': can_delete,
'can_propose': changeset.can_propose(request),
'can_unpropose': changeset.can_unpropose(request),
'can_start_review': changeset.can_start_review(request),
'can_end_review': changeset.can_end_review(request),
'can_unreject': changeset.can_unreject(request),
'active': active,
}
cache_key = '%s:%s:%s:view_data' % (changeset.cache_key_by_changes,
changeset.last_update_id,
int(can_edit))
changed_objects_data = cache.get(cache_key)
if changed_objects_data:
ctx['changed_objects'] = changed_objects_data
return render(request, 'editor/changeset.html', ctx)
objects = changeset.get_objects()
changed_objects_data = []
added_redirects = {}
removed_redirects = {}
for changed_object in changeset.changed_objects.get(LocationRedirect, {}).values():
if changed_object.is_created == changed_object.deleted:
continue
obj = objects[LocationRedirect][changed_object.obj_pk]
redirect_list = (removed_redirects if changed_object.deleted else added_redirects)
redirect_list.setdefault(obj.target_id, []).append(obj.slug)
redirect_changed_objects = []
for pk in set(added_redirects.keys()) | set(removed_redirects.keys()):
obj = objects[LocationSlug][pk]
model = obj.__class__
try:
changeset.changed_objects[model][pk]
except KeyError:
redirect_changed_objects.append((model, {pk: changeset.get_changed_object(obj)}))
for model, changed_objects in chain(changeset.changed_objects.items(), redirect_changed_objects):
if model == LocationRedirect:
continue
for pk, changed_object in changed_objects.items():
obj = objects[model][pk]
obj_desc = format_lazy(_('{model} #{id}'), model=obj.__class__._meta.verbose_name, id=pk)
if is_created_pk(pk):
obj_still_exists = pk in changeset.created_objects.get(obj.__class__, ())
else:
obj_still_exists = pk not in changeset.deleted_existing.get(obj.__class__, ())
edit_url = None
if obj_still_exists and can_edit and not isinstance(obj, LocationRedirect):
reverse_kwargs = {'pk': obj.pk}
if hasattr(obj, 'space_id'):
reverse_kwargs['space'] = obj.space_id
elif hasattr(obj, 'level_id'):
reverse_kwargs['level'] = obj.level_id
try:
edit_url = reverse('editor.' + obj.__class__._meta.default_related_name + '.edit',
kwargs=reverse_kwargs)
except NoReverseMatch:
pass
changes = []
missing_dependencies = changed_object.get_missing_dependencies()
unique_collisions = changed_object.get_unique_collisions()
changed_object_data = {
'model': obj.__class__,
'model_title': obj.__class__._meta.verbose_name,
'pk': changed_object.pk,
'desc': obj_desc,
'title': obj.title if getattr(obj, 'titles', None) else None,
'changes': changes,
'edit_url': edit_url,
'deleted': changed_object.deleted,
'missing_dependencies': missing_dependencies,
'unique_collisions': unique_collisions,
'order': (changed_object.deleted and changed_object.is_created, not changed_object.is_created),
}
changed_objects_data.append(changed_object_data)
form_fields = changeset.wrap_model(type(obj)).EditorForm._meta.fields
if changed_object.is_created:
changes.append({
'icon': 'plus',
'class': 'success',
'empty': True,
'title': _('created'),
})
update_changes = []
for name, value in changed_object.updated_fields.items():
change_data = {
'icon': 'option-vertical',
'class': 'muted',
}
if name == 'geometry':
change_data.update({
'icon': 'map-marker',
'class': 'info',
'empty': True,
'title': _('created geometry') if changed_object.is_created else _('edited geometry'),
'order': (8,),
})
elif name == 'data':
change_data.update({
'icon': 'signal',
'class': 'info',
'empty': True,
'title': _('created scan data') if changed_object.is_created else _('edited scan data'),
'order': (9,),
})
else:
if '__i18n__' in name:
orig_name, i18n, lang = name.split('__')
lang_info = get_language_info(lang)
field = model._meta.get_field(orig_name)
field_title = format_lazy(_('{field_name} ({lang})'),
field_name=field.verbose_name,
lang=lang_info['name_translated'])
field_value = str(value)
if field_value:
getattr(obj, field.attname)[lang] = field_value
else:
getattr(obj, field.attname).pop(lang, None)
change_data.update({
'order': (4, tuple(code for code, title in settings.LANGUAGES).index(lang)),
})
else:
field = model._meta.get_field(name)
field_title = field.verbose_name
field_value = field.to_python(value)
if field.related_model is not None:
if issubclass(field.related_model, User):
field_value = objects[field.related_model][field_value].username
else:
field_value = objects[field.related_model][field_value].title
change_data.update({
'missing_dependency': field.name in missing_dependencies,
})
if name in unique_collisions:
change_data.update({
'unique_collision': field.name in unique_collisions,
})
order = 5
if name == 'slug':
order = 1
if name not in form_fields:
order = 0
change_data.update({
'order': (order, form_fields.index(name) if order else 1),
})
if field_value == '' or field_value is None:
change_data.update({
'empty': True,
'title': format_lazy(_('remove {field_title}'), field_title=field_title),
})
else:
change_data.update({
'title': field_title,
'value': field_value,
})
update_changes.append(change_data)
changes.extend(sorted(update_changes, key=itemgetter('order')))
for m2m_mode in ('m2m_added', 'm2m_removed'):
m2m_list = getattr(changed_object, m2m_mode).items()
for name, values in sorted(m2m_list, key=lambda nv: form_fields.index(nv[0])):
field = model._meta.get_field(name)
for value in values:
changes.append({
'icon': 'chevron-right' if m2m_mode == 'm2m_added' else 'chevron-left',
'class': 'info',
'title': field.verbose_name,
'value': objects[field.related_model][value].title,
})
if isinstance(obj, LocationSlug):
for slug in added_redirects.get(obj.pk, ()):
changes.append({
'icon': 'chevron-right',
'class': 'info',
'title': _('Redirect slugs'),
'value': slug,
})
for slug in removed_redirects.get(obj.pk, ()):
changes.append({
'icon': 'chevron-left',
'class': 'info',
'title': _('Redirect slugs'),
'value': slug,
})
if changed_object.deleted:
changes.append({
'icon': 'minus',
'class': 'danger',
'empty': True,
'title': _('deleted'),
'order': (9,),
})
changed_objects_data = sorted(changed_objects_data, key=itemgetter('order'))
cache.set(cache_key, changed_objects_data, 300)
ctx['changed_objects'] = changed_objects_data
return render(request, 'editor/changeset.html', ctx)
@sidebar_view
def changeset_edit(request, pk):
changeset = request.changeset
if str(pk) != str(request.changeset.pk):
changeset = get_object_or_404(ChangeSet.qs_for_request(request), pk=pk)
with changeset.lock_to_edit(request) as changeset:
if not changeset.can_edit(request):
messages.error(request, _('You cannot edit this change set.'))
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
if request.method == 'POST':
form = ChangeSetForm(instance=changeset, data=request.POST)
if form.is_valid():
changeset = form.instance
update = changeset.updates.create(user=request.user,
title=changeset.title, description=changeset.description)
changeset.last_update = update
changeset.save()
return redirect(reverse('editor.changesets.detail', kwargs={'pk': changeset.pk}))
else:
form = ChangeSetForm(instance=changeset)
return render(request, 'editor/changeset_edit.html', {
'changeset': changeset,
'form': form,
})
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.