content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import torch
import torch.nn as nn
class Ensemble(nn.Module):
"""
Ensemble decoding.
Decodes using multiple models simultaneously,
Note:
Do not use this class directly, use one of the sub classes.
"""
def __init__(self, models):
super(Ensemble, self).__init__()
self.models = models
self.num_models = len(models)
def forward(self, *args, **kwargs):
raise NotImplementedError
class BasicEnsemble(Ensemble):
"""
Basic ensemble decoding.
Decodes using multiple models simultaneously,
combining their prediction distributions by adding.
All models in the ensemble must share a target characters.
"""
def __init__(self, models):
super(BasicEnsemble, self).__init__(models)
def forward(self, inputs, input_lengths):
hypothesis = None
with torch.no_grad():
for model in self.models:
if hypothesis is None:
hypothesis = model(inputs, input_lengths, teacher_forcing_ratio=0.0)
else:
hypothesis += model(inputs, input_lengths, teacher_forcing_ratio=0.0)
return hypothesis
class WeightedEnsemble(Ensemble):
"""
Weighted ensemble decoding.
Decodes using multiple models simultaneously,
combining their prediction distributions by weighted sum.
All models in the ensemble must share a target characters.
"""
def __init__(self, models, dim=128):
super(WeightedEnsemble, self).__init__(models)
self.meta_classifier = nn.Sequential(
nn.Linear(self.num_models, dim),
nn.ELU(inplace=True),
nn.Linear(dim, self.num_models)
)
def forward(self, inputs, input_lengths):
hypothesis = None
outputs = list()
weights = torch.FloatTensor([1.] * self.num_models)
# model`s parameters are fixed
with torch.no_grad():
for model in self.models:
outputs.append(model(inputs, input_lengths, teacher_forcing_ratio=0.0))
weights = self.meta_classifier(weights)
for (output, weight) in zip(outputs, weights):
if hypothesis is None:
hypothesis = output * weight
else:
hypothesis += output * weight
return hypothesis
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from gui.shared.tooltips.module import ModuleTooltipBlockConstructor
ModuleTooltipBlockConstructor.MAX_INSTALLED_LIST_LEN = 1000
print '[LOAD_MOD]: [mod_tooltipsCountItemsLimitExtend 1.00 (11-05-2018), by spoter, gox]'
|
nilq/baby-python
|
python
|
from .utils import validator
@validator
def ipv4(value):
"""
Return whether or not given value is a valid IP version 4 address.
This validator is based on `WTForms IPAddress validator`_
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv4('123.0.0.7')
True
>>> ipv4('900.80.70.11')
ValidationFailure(func=ipv4, args={'value': '900.80.70.11'})
.. versionadded:: 0.2
:param value: IP address string to validate
"""
parts = value.split('.')
if len(parts) == 4 and all(x.isdigit() for x in parts):
numbers = list(int(x) for x in parts)
return all(num >= 0 and num < 256 for num in numbers)
return False
@validator
def ipv6(value):
"""
Return whether or not given value is a valid IP version 6 address.
This validator is based on `WTForms IPAddress validator`_.
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv6('abcd:ef::42:1')
True
>>> ipv6('abc.0.0.1')
ValidationFailure(func=ipv6, args={'value': 'abc.0.0.1'})
.. versionadded:: 0.2
:param value: IP address string to validate
"""
parts = value.split(':')
if len(parts) > 8:
return False
num_blank = 0
for part in parts:
if not part:
num_blank += 1
else:
try:
value = int(part, 16)
except ValueError:
return False
else:
if value < 0 or value >= 65536:
return False
if num_blank < 2:
return True
elif num_blank == 2 and not parts[0] and not parts[1]:
return True
return False
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""TC77: Serially accessible digital temperature sensor particularly suited for low cost and small form-factor applications."""
__author__ = "ChISL"
__copyright__ = "TBD"
__credits__ = ["Microchip"]
__license__ = "TBD"
__version__ = "0.1"
__maintainer__ = "https://chisl.io"
__email__ = "info@chisl.io"
__status__ = "Test"
from TC77_constants import *
# name: TC77
# description: Serially accessible digital temperature sensor particularly suited for low cost and small form-factor applications.
# manuf: Microchip
# version: 0.1
# url: http://ww1.microchip.com/downloads/en/DeviceDoc/20092B.pdf
# date: 2016-08-17
# Derive from this class and implement read and write
class TC77_Base:
"""Serially accessible digital temperature sensor particularly suited for low cost and small form-factor applications."""
# Register CONFIG
# Select either Shutdown, Continuous Conversion or Test modes:
def setCONFIG(self, val):
"""Set register CONFIG"""
self.write(REG.CONFIG, val, 16)
def getCONFIG(self):
"""Get register CONFIG"""
return self.read(REG.CONFIG, 16)
# Bits CONFIG
# Register TEMP
# holds the temperature conversion data.
def setTEMP(self, val):
"""Set register TEMP"""
self.write(REG.TEMP, val, 16)
def getTEMP(self):
"""Get register TEMP"""
return self.read(REG.TEMP, 16)
# Bits TEMP
# the 13 bit tws complement data from the temperature conversion
# Bits FLAG_COMPLETE
# Bit 2 is set to a logic1 after
# completion of the first temperature conversion following a power-up or reset event.
# Bit 2 is set to a logic 0 during the time needed to complete the first
# temperature conversion. Therefore, the status of bit 2 can be monitored to indicate
# that the TC77 has completed the first temperature conversion.
# Bits unused_0
# Bits 0 and 1 are undefined and will be tri-state outputs during a read sequence.
# Register M_ID
# Manufacture's identification code
def setM_ID(self, val):
"""Set register M_ID"""
self.write(REG.M_ID, val, 16)
def getM_ID(self):
"""Get register M_ID"""
return self.read(REG.M_ID, 16)
# Bits ID
# Bits unused_0
# bits 7:2 are set to0
# Bits unused_1
# Bits 1:0 are undefined and will be tri- state outputs during a read sequence
|
nilq/baby-python
|
python
|
file = open('Day 10 input.txt','r')
#file = open('Advent-of-Code-2021\\Day 10 testin.txt','r')
illegal = [0,0,0,0]
completescores = []
for line in file:
line = line.strip()
illegalflag = False
stack = []
for char in line:
if ((ord(char) == 40) or (ord(char) == 91) or (ord(char) == 123) or (ord(char) == 60)):
stack.append(char)
continue
opener = stack.pop()
if ((ord(opener) == 40) and (ord(char) == 41)):
continue
if ((ord(opener) == 91) and (ord(char) == 93)):
continue
if ((ord(opener) == 123) and (ord(char) == 125)):
continue
if ((ord(opener) == 60) and (ord(char) == 62)):
continue
if (ord(char) == 41):
illegal[0] = illegal[0] + 1
illegalflag = True
break
if (ord(char) == 93):
illegal[1] = illegal[1] + 1
illegalflag = True
break
if (ord(char) == 125):
illegal[2] = illegal[2] + 1
illegalflag = True
break
if (ord(char) == 62):
illegal[3] = illegal[3] + 1
illegalflag = True
break
if (illegalflag == True):
continue
completescore = 0
while not (stack == []):
item = stack.pop()
completescore = completescore * 5
if (ord(item) == 40):
completescore = completescore + 1
continue
if (ord(item) == 91):
completescore = completescore + 2
continue
if (ord(item) == 123):
completescore = completescore + 3
continue
if (ord(item) == 60):
completescore = completescore + 4
continue
completescores.append(completescore)
print(sorted(completescores)[len(completescores)//2])
|
nilq/baby-python
|
python
|
import numpy as np
import pyautogui
def screenshot(bounds=None):
image = pyautogui.screenshot()
open_cv_image = np.array(image)
open_cv_image = open_cv_image[:, :, ::-1]
if bounds is not None:
x = bounds[0]
y = bounds[1]
open_cv_image = open_cv_image[x[0]:x[1], y[0]:y[1]]
return open_cv_image
|
nilq/baby-python
|
python
|
from pathlib import PurePath
from typing import Dict, List
from lab import util
from lab.logger import internal
from .indicators import Indicator, Scalar
from .writers import Writer
class Store:
indicators: Dict[str, Indicator]
def __init__(self, logger: 'internal.LoggerInternal'):
self.values = {}
# self.queues = {}
# self.histograms = {}
# self.pairs: Dict[str, List[Tuple[int, int]]] = {}
# self.scalars = {}
self.__logger = logger
self.indicators = {}
self.__indicators_file = None
def save_indicators(self, file: PurePath):
self.__indicators_file = file
indicators = {k: ind.to_dict() for k, ind in self.indicators.items()}
with open(str(file), "w") as file:
file.write(util.yaml_dump(indicators))
def add_indicator(self, indicator: Indicator):
"""
### Add an indicator
"""
assert indicator.name not in self.indicators
self.indicators[indicator.name] = indicator
indicator.clear()
if self.__indicators_file is not None:
self.save_indicators(self.__indicators_file)
def _store_list(self, items: List[Dict[str, float]]):
for item in items:
self.store(**item)
def _store_kv(self, k, v):
if k not in self.indicators:
self.__logger.add_indicator(Scalar(k, True))
self.indicators[k].collect_value(v)
def _store_kvs(self, **kwargs):
for k, v in kwargs.items():
self._store_kv(k, v)
def store(self, *args, **kwargs):
"""
### Stores a value in the logger.
This may be added to a queue, a list or stored as
a TensorBoard histogram depending on the
type of the indicator.
"""
assert len(args) <= 2
if len(args) == 0:
self._store_kvs(**kwargs)
elif len(args) == 1:
assert not kwargs
assert isinstance(args[0], list)
self._store_list(args[0])
elif len(args) == 2:
assert isinstance(args[0], str)
if isinstance(args[1], list):
for v in args[1]:
self._store_kv(args[0], v)
else:
self._store_kv(args[0], args[1])
def clear(self):
for k, v in self.indicators.items():
v.clear()
def write(self, writer: Writer, global_step):
return writer.write(global_step=global_step,
indicators=self.indicators)
|
nilq/baby-python
|
python
|
#Verilen listenin içindeki elemanları tersine döndüren bir fonksiyon yazın.
# Eğer listenin içindeki elemanlar da liste içeriyorsa onların elemanlarını da tersine döndürün.
# Örnek olarak:
# input: [[1, 2], [3, 4], [5, 6, 7]]
# output: [[[7, 6, 5], [4, 3], [2, 1]]
liste = [[1, 2], [3, 4], [5, 6, 7]]
liste.reverse()
for l in liste:
l.reverse()
print(liste)
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
from typing import Union
from CatLampPY import isGuild, hasPermissions, CommandErrorMsg # pylint: disable=import-error
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.cmds.append(self.purge)
self.bot.cmds.append(self.kick)
self.bot.cmds.append(self.ban)
self.bot.cmds.append(self.unban)
async def gf_user(self, user_id: int):
user = self.bot.get_user(user_id)
if not user:
try:
user = await self.bot.fetch_user(user_id)
except discord.NotFound:
raise CommandErrorMsg(f'No user with the ID {user_id} was found!')
return user
@commands.command(aliases=["bulkDelete"])
@isGuild()
@hasPermissions("manage_messages")
async def purge(self, ctx, number_of_messages: int):
"""Purges a certain amount of messages up to 100. Only works in servers."""
if number_of_messages <= 0:
raise CommandErrorMsg("I need at least 1 message to purge!")
elif number_of_messages > 100:
raise CommandErrorMsg("I can't purge more than 100 messages at a time!")
await ctx.message.delete()
msgsDeleted = await ctx.channel.purge(limit=number_of_messages)
msg = await ctx.send(f"Deleted {len(msgsDeleted)} messages.")
try:
await msg.delete(delay=5)
except discord.NotFound:
pass
@commands.command(cooldown_after_parsing=True)
@commands.cooldown(1, 10, commands.BucketType.member)
@hasPermissions("kick_members")
async def kick(self, ctx, member: discord.Member, reason: str = "No reason specified."):
"""Kick a user with an optional reason. Requires the Kick Members permission."""
if member.id == self.bot.user.id:
await ctx.send(":(")
return
elif member.id == ctx.guild.owner.id:
raise CommandErrorMsg("I can't kick the server owner!")
try:
await ctx.guild.kick(member,
reason=f"Kicked by {str(ctx.author)} ({ctx.author.id}) with reason: '{reason}'")
except discord.Forbidden:
raise CommandErrorMsg("I'm not high enough in the role hierarchy to kick that person!")
await ctx.send(f"{member.mention} ({str(member)}) has been kicked from the server with reason: '{reason}'")
@commands.command(cooldown_after_parsing=True)
@commands.cooldown(1, 10, commands.BucketType.member)
@hasPermissions("ban_members")
async def ban(self, ctx, user: Union[discord.User, int], reason: str = "No reason specified.",
days_of_messages_to_delete: int = 0):
"""Ban a user (including someone not in the server) with an optional reason and days of messages to delete.
Requires the Ban Members permission."""
if isinstance(user, int):
user = await self.gf_user(user)
try:
await ctx.guild.fetch_ban(user)
# Since an exception wasn't raised, a ban for this user already exists.
await ctx.send("That user is already banned!")
return
except discord.NotFound:
if user.id == self.bot.user.id:
await ctx.send(":(")
return
try:
await ctx.guild.ban(user, reason=f"Banned by {str(ctx.author)} "
f"({ctx.author.id}) with reason: '{reason}'",
delete_message_days=days_of_messages_to_delete)
except discord.Forbidden:
raise CommandErrorMsg("I'm not high enough in the role hierarchy to ban that person!")
await ctx.send(f"{user.mention} ({str(user)}) has been banned from the server with reason: '{reason}'")
@commands.command(cooldown_after_parsing=True)
@commands.cooldown(1, 10, commands.BucketType.member)
@hasPermissions("ban_members")
async def unban(self, ctx, user: Union[discord.User, int]):
"""Unbans a user. Requires the Ban Members permission."""
if isinstance(user, int):
user = await self.gf_user(user)
try:
# This is to check if the user is actually banned.
# If the user is not banned, fetch_ban will raise NotFound.
await ctx.guild.fetch_ban(user)
await ctx.guild.unban(
user, reason=f'Unbanned by {ctx.author} ({ctx.author.id})'
)
await ctx.send(f'{user.mention} ({user}) has been unbanned from the server.')
except discord.NotFound:
raise CommandErrorMsg("That user is not banned!")
def setup(bot):
bot.add_cog(Moderation(bot))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Low-level feature detection including: Canny, corner Harris, Hough line,
Hough circle, good feature to track, etc.
"""
from __future__ import annotations
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
from typing import Optional
from pyhcl.core._repr import CType
from pyhcl.ir import low_ir
@dataclass(eq=False, init=False)
class INT(CType):
v: int
def __init__(self, v: int):
self.v = int(v)
@property
def orR(self):
return Bool(not not self.v)
class UInit(type):
def __call__(cls, v: int):
return U.w(max(v.bit_length(), 1))(v)
class U(CType, metaclass=UInit):
def __init__(self, _: int):
pass
@staticmethod
def _lowWidth(width: Optional[int] = None):
return low_ir.IntWidth(width) if width is not None else None
@staticmethod
def w(width: Optional[int] = None):
"""
Return a UInt type with assigned width
If width is not given, it would be inferred
"""
@classmethod
def _flip(cls):
cls.field = low_ir.Flip()
return cls
def _mapToIR(_, __=None):
# If caller is UInt Type, it would call `mapToIR(ctx)`
# Or caller is UInt Literal, it would call `mapToIR(literal, ctx)`
if __ is not None:
return low_ir.UIntLiteral(_.v, U._lowWidth(width))
else:
return low_ir.UIntType(U._lowWidth(width))
def _idxType(_ = None):
return U.w(1)
uk = type(f"U?", (INT,), {"mapToIR": _mapToIR, "getIndexedType": _idxType})
uk.typ = uk
if width is not None:
t = type(f"U{width}", (INT,),
{"width": width, "mapToIR": _mapToIR, "getIndexedType": _idxType,
"field": low_ir.Default(), "flip": _flip})
t.typ = uk
return t
else:
return uk
Bool = U.w(1)
class SInit(type):
def __call__(cls, v: int):
return S.w(v.bit_length() + 1)(v)
class S(CType, metaclass=SInit):
def __init__(self, _: int):
pass
@staticmethod
def _lowWidth(width: Optional[int] = None):
return low_ir.IntWidth(width) if width is not None else None
@staticmethod
def w(width: Optional[int] = None):
"""
Return a UInt type with assigned width
If width is not given, it would be inferred
"""
def _mapToIR(_, __=None):
# If caller is SInt Type, it would call `mapToIR(ctx)`
# Or caller is SInt Literal, it would call `mapToIR(literal, ctx)`
if __ is not None:
return low_ir.SIntLiteral(_.v, S._lowWidth(width))
else:
return low_ir.SIntType(S._lowWidth(width))
def _idxType():
return S.w(1)
uk = type(f"S?", (INT,), {"mapToIR": _mapToIR, "getIndexedType": _idxType})
uk.typ = uk
if width is not None:
t = type(f"S{width}", (INT,), {"width": width, "mapToIR": _mapToIR, "getIndexedType": _idxType})
t.typ = uk
return t
else:
return uk
class Clock(CType):
def mapToIR(self, ctx):
return low_ir.ClockType()
|
nilq/baby-python
|
python
|
"""
Main Methods are declared here
"""
from picocv._settings import Settings
from picocv.utils.train import Trainer
from picocv.utils.augment import DatasetAugmenter
def autoCorrect(model_func, dataset_func, settings : Settings):
"""
Performs Auto Correct Algorithm (Main Method)
:param model_func: Function that returns Custom Model Class (torch.nn.Module)
:param dataset_func: Function that returns Custom Dataset Class (torch.utils.data.Dataset)
:param settings: Picocv Settings (picocv.Settings)
:return: None
"""
# Validate Settings
assert settings.validate(), 'Update to Valid Settings Variables!!'
# Initialize Dataset Augmenter
dataset_augmenter = DatasetAugmenter(dataset_func=dataset_func, settings=settings)
input_string = input('\nContinue? (Y/n)')
if input_string == 'Y':
# Start Pico Algorithm
for iteration in range(settings.n_iter):
print('[{current_iteration}/{total_iteration}] Starting {current_iteration}-th Iteration...'.format(current_iteration=iteration + 1,
total_iteration=settings.n_iter))
for segment_id in range(dataset_augmenter.N_SEGMENT):
print('Start Training Checker-[{segment_id}]'.format(segment_id=segment_id))
segment_dataset = dataset_augmenter.get_dataset(iteration_id=iteration, segment_id=segment_id) # returned segmented dataset
trainer = Trainer(model_func=model_func, dataset=segment_dataset, settings=settings) # initialize trainer
print('finished')
|
nilq/baby-python
|
python
|
from .run import wait, load
__all__ = ['wait', 'load']
|
nilq/baby-python
|
python
|
# Админка раздел редактор курсов
# Энпоинты меню редактора курсов ДШ в тек. уг
path_admin_schedules_grade_1 = '/schedules?grade=1&school=true&'
path_admin_schedules_grade_2 = '/schedules?grade=2&school=true&'
path_admin_schedules_grade_3 = '/schedules?grade=3&school=true&'
path_admin_schedules_grade_4 = '/schedules?grade=4&school=true&'
path_admin_schedules_grade_5 = '/schedules?grade=5&school=true&'
path_admin_schedules_grade_6 = '/schedules?grade=6&school=true&'
path_admin_schedules_grade_7 = '/schedules?grade=7&school=true&'
path_admin_schedules_grade_8 = '/schedules?grade=8&school=true&'
path_admin_schedules_grade_9 = '/schedules?grade=9&school=true&'
path_admin_schedules_grade_10 = '/schedules?grade=10&school=true&'
path_admin_schedules_grade_11 = '/schedules?grade=11&school=true&'
# Прикрепление\удаление предмета
path_admin_add_subject = '/schedules?'
path_admin_delete_subject = '/schedules/5583026?'
# Раздел редактирования предмета
path_admin_item_editor = '/schedule_items.json?schedule_id=3908531&' # переход в редактор предмета
path_admin_add_topic = '/topics?' # добавить тему
path_admin_add_lesson = 'lessons.json?' # Создание нового урока
path_admin_lesson_for_day = '/schedule_items.json?' # привязка урока к дате
path_admin_remove_lesson = '/lessons/37865.json?' # удаление урока
path_admin_remove_topic = '/topics/24273?addLessonHide=true&addLessonNameEvent=click&calendarActive=false&editTopicNameHide=true&lessonsHide=false&name=тест&schedule_id=3908531&subject_id=201&'
path_admin_save_date_ege = '/schedules/3908531?' # сохранение даты ЕГЭ
# редактор МДЗ
path_admin_monthly_homework_editor = '/monthly_homeworks?schedule_id=3908531&' # открытие редактора МДЗ
path_admin_create_monthly_homework = '/monthly_homeworks?' # создание МДЗ
path_admin_delete_monthly_homework = '/monthly_homeworks/7229?' # удаление МДЗ
# Энпоинты редактора курсов ЕГЭ
path_admin_editor_ege = '/schedules?grade=11&school=false&' # переход в редактор егэ
path_admin_attach_subject_ege = '/schedules?' # прикрепление предмета егэ
path_admin_delete_subject_ege = '/schedules/5583707?' # удаление предмета егэ
path_admin_add_topic = '/topics?' # добавить тему
def __init__(self, token=None):
self.token = token
def get_token(self):
headers_user = {
"Authorization": self.access_token,
}
return headers_user
|
nilq/baby-python
|
python
|
import os
import re
import sys
sys.path.append(os.path.dirname(__file__))
import nb_file_util as fu
class SymbolLister(fu.CellProcessorBase):
def calls_sympy_symbol(self):
"""
if symbol definition line included, return the line numbers and the contents in a list
:return: list of dict('line_number':int, 'source':str])
"""
# TODO : What if import sympy
# TODO : Consider using ast module
result = []
if self.is_code():
if self.has_source():
for line_number, source_line in enumerate(self.cell['source'].splitlines()):
if ('sy.symbols' in source_line) or ('sy.Symbol' in source_line):
result.append({'line number': line_number, 'source': source_line})
return result
def process_cell(self):
return self.calls_sympy_symbol()
class SymbolConverter(SymbolLister):
"""
sy.symbols('L_AB_m', real=True, nonnegative=True) -> sy.symbols('L_{AB}[m]', real=True, nonnegative=True)
sy.symbols('w0_N_m', real=True) -> sy.symbols('w0[N/m]', real=True)
"L_AB_m, L_AC_m = sy.symbols('L_AB_m, L_AC_m', real=True, nonnegative=True)"
-> [find symbol location] -> 'L_AB_m, L_AC_m' ->
'L_AB_m' -> [wrap_symbol_name] -> 'L_{AB}_{m}' -> 'L_{AB}[m]'
"""
units_set = {'m', 'mm', 'mm3', 'm2', 'm3', 'm4', 'deg', 'rad', 'N', 'Nm', 'N_m', 'Pa', 'MPa', 'm_s2', 'kg'}
def __init__(self):
super().__init__()
self.conversion_table_dict = self.unit_underline_wrap_bracket()
self.secondary_table_dict = self.make_secondary_table()
self.re_split = self.prepare_split_rule()
@staticmethod
def make_secondary_table():
return {
'_{N}[m]': '[N/m]',
'_{N}[mm]': '[N/mm]',
'_{N}[m^{2}]': '[N/m^{2}]',
'_{N}[mm^{2}]': '[N/mm^{2}]',
}
@staticmethod
def prepare_split_rule():
return re.compile(r'[, ]')
@staticmethod
def wrap_symbol_name(symbol_name):
"""
Wrap '_' separated symbol name parts with '{}'
:param str symbol_name:
:return:
Example
>>> cp = SymbolConverter()
>>> cp.wrap_symbol_name('L_AB_m')
'L_{AB}_{m}'
"""
symbol_name_split_under_line = symbol_name.split('_')
if 1 < len(symbol_name_split_under_line):
symbol_name_underline_wrapped = [symbol_name_split_under_line[0]]
for part in symbol_name_split_under_line[1:]:
symbol_name_underline_wrapped.append('{%s}' % part)
symbol_name = '_'.join(symbol_name_underline_wrapped)
return symbol_name
def unit_underline_wrap_bracket(self):
"""
'_{m_s2}': '[m/s^{2}]'
'_{N_m}': '[N/m]'
:return: dictionary
:rtype dict
"""
conversion_table_dict = {}
for unit in self.units_set:
key = '_{%s}' % unit
value = '[%s]' % unit.replace('_', '/').replace('4', '^{4}').replace('3', '^{3}').replace('2', '^{2}')
conversion_table_dict[key] = value
return conversion_table_dict
def process_cell(self):
source_lines = self.cell['source'].splitlines()
symbol_list = self.calls_sympy_symbol()
# [{'line number': int, 'source': str}]
for symbol_line in symbol_list:
converted_line = self.process_line(symbol_line['source'])
# replace the source code with the new line
source_lines[symbol_line['line number']] = converted_line
converted_source_code = '\n'.join(source_lines)
if self.cell['source'] and '\n' == self.cell['source'][-1]:
converted_source_code += '\n'
# update cell
self.cell['source'] = converted_source_code
def process_line(self, source_line):
"""
SymbolConverter.process_line()
Find SymPy
"""
symbol_names_location = self.find_symbol_name_location(source_line)
symbol_names_str = source_line[symbol_names_location[0]:symbol_names_location[1]]
symbol_names_list = filter(lambda x: bool(x),
[symbol_name.strip() for symbol_name in self.re_split.split(symbol_names_str)])
converted_symbol_names_list = [self.process_symbol_name(symbol_name) for symbol_name in symbol_names_list]
converted_symbol_names_str = ', '.join(converted_symbol_names_list)
converted_source_line = (source_line[:symbol_names_location[0]]
+ converted_symbol_names_str
+ source_line[symbol_names_location[1]:])
return converted_source_line
def process_symbol_name(self, symbol_name):
result = {symbol_name:symbol_name}
wrapped = self.wrap_symbol_name(symbol_name)
# first conversion layer : for majority of cases
result.update(self.apply_lookup_table(wrapped, symbol_name))
# second conversion layer : for N/m, N/m^{2} cases
result.update(self.apply_lookup_table(result[symbol_name], symbol_name, self.secondary_table_dict))
return result[symbol_name]
def find_symbol_name_location(self, source_line):
"""
:param str source_line:
:return: (int, int)
>>> cp = SymbolConverter()
>>> source_line = "L_AB_m = sy.symbols('L_AB_m', real=True, nonnegative=True)"
>>> result = cp.find_symbol_name_location(source_line)
>>> source_line[result[0]:result[1]]
'L_AB_m'
>>> source_line = "L_AB_m = sy.Symbol('L_AB_m', real=True, nonnegative=True)"
>>> result = cp.find_symbol_name_location(source_line)
>>> source_line[result[0]:result[1]]
'L_AB_m'
"'"
"""
first_attempt = re.search(r'.*\.(Symbol|symbols)\s*\([\'\"]', source_line)
quote = source_line[first_attempt.regs[0][1] - 1]
quote_pattern = chr(92) + quote # backslash + ['"]
second_attempt = re.search(r'.*\.(Symbol|symbols)\s*\(' + quote_pattern + r'(.+?)' + quote_pattern, source_line)
if first_attempt:
start = first_attempt.regs[0][1]
end = second_attempt.regs[0][1] - 1
result = (start, end)
else:
result = None
return result
def apply_lookup_table(self, text_to_apply, original_symbol_name, lookup_table_dict=None):
if lookup_table_dict is None:
lookup_table_dict = self.conversion_table_dict
new_small_dict = {}
# lookup table loop
for to_be_replaced in lookup_table_dict:
if text_to_apply.endswith(to_be_replaced):
new_small_dict[original_symbol_name] = text_to_apply.replace(to_be_replaced,
lookup_table_dict[to_be_replaced])
# if lookup table original_symbol_name found, break lookup table loop
break
return new_small_dict
class IpynbUnitConverter(fu.FileProcessor):
def __init__(self, nb_filename):
super().__init__(nb_filename=nb_filename, cell_processor=SymbolConverter())
def symbol_lines_in_file(input_file_name):
sc = SymbolLister()
file_processor = fu.FileProcessor(input_file_name, sc)
result = file_processor.process_nb_file()
return result
|
nilq/baby-python
|
python
|
class ParserListener:
def update(self, phase, row):
""" Called when the parser has parsed a new record.
"""
pass
def handle(self, event, message, groups):
""" Called when the parser has parsed a registered event.
"""
pass
def registerKey(self, phase, key):
""" Called when a new key was found in the log data.
"""
pass
def parsingFinished(self):
""" Called when the parser has processed all available streams.
"""
pass
|
nilq/baby-python
|
python
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
"""
def __init__(self, file_name='name'):
Distribution.__init__(self, file_name)
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def extract_stats_from_data(self):
"""Function to calculate p, n from the data
set
Args:
None
Returns:
None
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
def plot_bar(self):
"""Function to output a bar chart of the number of successes and
failures using matplotlib pyplot library.
Args:
None
Returns:
None
"""
self.extract_stats_from_data()
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Number of successes (1) and failures (0) ')
plt.xlabel('outcome')
plt.ylabel('count')
plt.show()
def pmf(self, k):
"""Probability mass function calculator for the binomial distribution.
Args:
k (natural number): number of successes
Returns:
float: probability mass function output
"""
if ((isinstance(k,int) == False) or (k < 0)):
print ("k (the argumnet of pmf) needs to be a non-negative integer")
exit()
self.extract_stats_from_data()
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pmf(self):
"""Function to plot the pmf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
self.extract_stats_from_data()
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pmf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability Mass Function')
plt.xlabel('Number of successes (k)')
plt.show()
return
def __repr__(self):
"""Function to output the parameters of the Binomial instance
Args:
None
Returns:
string: characteristics of the Binomial
"""
self.extract_stats_from_data()
return "Number of trials {}, success propability for each trial {} ".\
format(self.n, round(self.p, 2))
|
nilq/baby-python
|
python
|
def f(x):
y = x
return f(y)
f(0)
|
nilq/baby-python
|
python
|
import os
import sys
root_path = os.path.abspath("../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from _Dist.NeuralNetworks.DistBase import Base, AutoBase, AutoMeta, DistMixin, DistMeta
class LinearSVM(Base):
def __init__(self, *args, **kwargs):
super(LinearSVM, self).__init__(*args, **kwargs)
self._name_appendix = "LinearSVM"
self.c = None
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
super(LinearSVM, self).init_from_data(x, y, x_test, y_test, sample_weights, names)
metric = self.model_param_settings.setdefault("metric", "binary_acc")
if metric == "acc":
self.model_param_settings["metric"] = "binary_acc"
self.n_class = 1
def init_model_param_settings(self):
self.model_param_settings.setdefault("lr", 0.01)
self.model_param_settings.setdefault("n_epoch", 10 ** 3)
self.model_param_settings.setdefault("max_epoch", 10 ** 6)
super(LinearSVM, self).init_model_param_settings()
self.c = self.model_param_settings.get("C", 1.)
def _build_model(self, net=None):
self._model_built = True
if net is None:
net = self._tfx
current_dimension = net.shape[1].value
self._output = self._fully_connected_linear(
net, [current_dimension, 1], "_final_projection"
)
def _define_loss_and_train_step(self):
self._loss = self.c * tf.reduce_sum(
tf.maximum(0., 1 - self._tfy * self._output)
) + tf.nn.l2_loss(self._ws[0])
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self._train_step = self._optimizer.minimize(self._loss)
def _get_feed_dict(self, x, y=None, weights=None, is_training=False):
if y is not None:
y[y == 0] = -1
return super(LinearSVM, self)._get_feed_dict(x, y, weights, is_training)
def predict_classes(self, x):
return (self._calculate(x, tensor=self._output, is_training=False) >= 0).astype(np.int32)
class SVM(LinearSVM):
def __init__(self, *args, **kwargs):
super(SVM, self).__init__(*args, **kwargs)
self._name_appendix = "SVM"
self._p = self._gamma = None
self._x = self._gram = self._kernel_name = None
@property
def kernel(self):
if self._kernel_name == "linear":
return self.linear
if self._kernel_name == "poly":
return lambda x, y: self.poly(x, y, self._p)
if self._kernel_name == "rbf":
return lambda x, y: self.rbf(x, y, self._gamma)
raise NotImplementedError("Kernel '{}' is not implemented".format(self._kernel_name))
@staticmethod
def linear(x, y):
return x.dot(y.T)
@staticmethod
def poly(x, y, p):
return (x.dot(y.T) + 1) ** p
@staticmethod
def rbf(x, y, gamma):
return np.exp(-gamma * np.sum((x[..., None, :] - y) ** 2, axis=2))
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
self._x, y = np.atleast_2d(x).astype(np.float32), np.asarray(y, np.float32)
self._p = self.model_param_settings.setdefault("p", 3)
self._gamma = self.model_param_settings.setdefault("gamma", 1 / self._x.shape[1])
self._kernel_name = self.model_param_settings.setdefault("kernel_name", "rbf")
self._gram, x_test = self.kernel(self._x, self._x), self.kernel(x_test, self._x)
super(SVM, self).init_from_data(self._gram, y, x_test, y_test, sample_weights, names)
def init_model_param_settings(self):
super(SVM, self).init_model_param_settings()
self._p = self.model_param_settings["p"]
self._gamma = self.model_param_settings["gamma"]
self._kernel_name = self.model_param_settings["kernel_name"]
def _define_py_collections(self):
super(SVM, self)._define_py_collections()
self.py_collections += ["_x", "_gram"]
def _define_loss_and_train_step(self):
self._loss = self.c * tf.reduce_sum(tf.maximum(0., 1 - self._tfy * self._output)) + 0.5 * tf.matmul(
self._ws[0], tf.matmul(self._gram, self._ws[0]), transpose_a=True
)[0]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self._train_step = self._optimizer.minimize(self._loss)
def _evaluate(self, x=None, y=None, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
n_sample = self._x.shape[0]
cv_feat_dim = None if x_cv is None else x_cv.shape[1]
test_feat_dim = None if x_test is None else x_test.shape[1]
x_cv = None if x_cv is None else self.kernel(x_cv, self._x) if cv_feat_dim != n_sample else x_cv
x_test = None if x_test is None else self.kernel(x_test, self._x) if test_feat_dim != n_sample else x_test
return super(SVM, self)._evaluate(x, y, x_cv, y_cv, x_test, y_test)
def predict(self, x):
# noinspection PyTypeChecker
return self._predict(self.kernel(x, self._x))
def predict_classes(self, x):
return (self.predict(x) >= 0).astype(np.int32)
def evaluate(self, x, y, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
return self._evaluate(self.kernel(x, self._x), y, x_cv, y_cv, x_test, y_test, metric)
class AutoLinearSVM(AutoBase, LinearSVM, metaclass=AutoMeta):
pass
class DistLinearSVM(AutoLinearSVM, DistMixin, metaclass=DistMeta):
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# Copyright 2019 Fortinet Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import box
import os
import tempfile
import pickle
import uuid
from termcolor import colored
# Custom imports
from bph.core.logger import BphLogger as Logger
from bph.core.constants import *
from bph.core.session import BphSession as Session
from bph.core.sample import BphLabFile as LabFile
from bph.core.constants import *
class BphTemplate:
def __init__(self):
self.logger = Logger(level='INFO', module=self.__module__)
class BphToolTemplate(BphTemplate):
def __init__(self):
super().__init__()
class BphToolTemplateConfiguration(BphToolTemplate):
def __init__(self):
super().__init__()
def __locate_tool_config_file(self, *args):
""" Search for the Tool config file """
self.tool_name = args[0]
self.arch = args[1]
self.version = args[2]
self.tool_directory = None
self.md5 = Session.sample_md5
self.logger.log('TemplateConfig #1: {}'.format(self.__dict__), level='DEBUG')
# Detect the tool's base folder.
for root, dirs, files in os.walk(BPH_PLUGIN_DIR):
for directory in dirs:
if self.tool_name in directory:
self.logger.log('Tool Match: {}'.format(self.tool_name), level='DEBUG')
tool_dir = os.path.join(root, directory, self.arch)
self.logger.log(tool_dir, level='DEBUG')
if os.path.isdir(tool_dir):
self.logger.log(f"Tool dir: {tool_dir}", level='DEBUG')
self.tool_directory = tool_dir
# Generating remote tool's path
# Peid: E:\basic\static\peid\x86\0.95\peid.exe
self.remote_tool_path = "{}\\{}".format(
"\\".join(tool_dir.split('/')[5:]), self.version)
self.logger.log(f"Remote Tool Path: {self.remote_tool_path}", level='DEBUG')
def load_tool_config_file(self, tool_name, arch, version, target_file=None):
""" Loads the tool config file: (JSON data -> BOX object) conversion"""
try:
# print(f"Loading Template ({tool_name}) Arch: {arch} Version: ({version})")
self.__locate_tool_config_file(tool_name, arch, version)
cfg_file = f"{self.tool_directory}/{self.version}/{self.tool_name}.json"
self.logger.log('Config file path: {}'.format(cfg_file))
j = open(cfg_file, 'r')
except FileNotFoundError as e:
self.logger.log('Cannot open config JSON file: {}'.format(e), level='DEBUG')
else:
self.logger.log('Loading JSON config file', level='DEBUG')
try:
json_data = json.load(j)
# This will set the dictionary required to hold
# custom user variables used in json template/config files.
json_data['configuration']['execution']['download_sample'] = False
json_data['configuration']['execution']['custom_user_vars'] = {}
json_data['configuration']['execution']['delay'] = 0
json_data['actions']['action'] = ""
except json.JSONDecodeError:
self.logger.log('Error during JSON decoding', level='DEBUG')
return False
else:
j.close()
self.logger.log('The JSON config file was loaded correctly', level='DEBUG')
# The Config JSON data is loaded and then converted
# into an extended python dict by using the python-box
# module. Through this way, attributes can be accessed
# with dot notation:
#
# self.automation.normal_scan.execute = True
#
self.__dict__.update(box.Box(json_data))
#print("JSON_AND_DICT_DATA: {}".format(self.__dict__))
if target_file is None:
self.logger.log('>> Target file is not set', level='DEBUG')
self.configuration.execution.download_sample = False
elif target_file is not None:
self.logger.log('>> Target file is set', level='DEBUG')
self.configuration.execution.download_sample = True
self.download_url = target_file.download_url
else:
self.logger.log('>> Unknown target', level='DEBUG')
class BphToolTemplateExecutor(BphToolTemplateConfiguration):
server_status = None
template_delivered = False
template_file = None
def __init__(self):
super().__init__()
# Variables added into the general (not-boxed) JSON Template
self.module_name = self.__module__
self.sid = Session.get_session_id()
self.md5 = Session.sample_md5
self.project_name = Session.project_name
self.rid = str(uuid.uuid4())
self.tool_drive = BPH_REMOTE_TOOLS_DRIVE
def __dump_command_file(self, tmp_file):
""" Dump Template's JSON data into Temporary file """
try:
tmp = open(tmp_file, 'wb')
self.logger.log(f"Dumping Template Data into a Tmp file: {tmp.name}", level='DEBUG')
# At this time self.__dict__ was already boxed.
# Making a copy of current objetc's dictionaty and removing logger
# from it. This way the 'logger object' is not included within the
# template data and regular 'logger; module remains.
template_data = {}
for k,v in self.__dict__.items():
if k != "logger":
self.logger.log('Key: {} Value: {}'.format(k, v), level='DEBUG')
if k not in template_data:
template_data.update({k: v})
if BPH_TEMPLATE_SERVER_OUTPUT:
self.logger.log(template_data)
pickle.dump(template_data, tmp, protocol=2)
del template_data
tmp.close()
self.logger.log(self.__dict__, level='DEBUG')
except IOError:
self.logger.log("Tmp file can't be written", level='DEBUG')
return False
else:
self.logger.log('Tmp file - OK', level='DEBUG')
return True
def __make_cmds_tmp_file(self):
""" Created Temporary File """
try:
self.logger.log('Creating Temporary File', level='DEBUG')
with tempfile.NamedTemporaryFile(mode='w+b', dir=BPH_TMP_DIR, delete=False, prefix='blackphenix_') as f:
tmp_file = f.name
except:
self.logger.log('Error when creating tmp file', level='DEBUG')
else:
self.logger.log('Tmp file created:{}'.format(tmp_file), level='DEBUG')
return tmp_file
def _scan_bph_tmp_file(self, clean=False):
""" Scans Windows Temporary Folder for bph_ files """
self.logger.log('Scanning...', level='DEBUG')
for root, dirs, files in os.walk(BPH_TMP_DIR):
for file in files:
# All files matching "blackphenix_" prefix
if "blackphenix_" in file:
bph_tmp_file = "{}{}".format(root, file)
if os.path.getsize(bph_tmp_file) != 0:
self.logger.log('Tmp file: {}'.format(bph_tmp_file), level='DEBUG')
#os.system("ls -lskh {}".format(bph_tmp_file))
else:
self.logger.log('Removing Empty file...')
os.remove(bph_tmp_file)
if clean is not False:
try:
self.logger.log('Cleaning: {}'.format(bph_tmp_file), level='DEBUG')
os.remove(bph_tmp_file)
except OSError:
self.logger.log("Tmp file can't be deleted", level='DEBUG')
return False
else:
self.logger.log('File was removed - cleaned.', level='DEBUG')
self.logger.log('Found BphFile: {}'.format(bph_tmp_file), level='DEBUG')
return bph_tmp_file
def execute(self, delay=0):
self.logger.log("Executing Template")
# If a user choose a delay for execute(), then this
# value is passed as parameter within the template
# request. This will allow the windows agent to pause
# the same amount of seconds chosen by the execute()
# function.
# <Box: {'admin_required': False,
# 'delay': 20}>
#
self.configuration.execution.delay = delay
# The 1 sec timeout allows enough time between exec() requests
# to generate a template file and make it ready for the agent.
time.sleep(2)
if not BphToolTemplateExecutor.server_status:
self.logger.log('Waiting for Agent Connection....')
while True:
if BphToolTemplateExecutor.server_status:
self.logger.log('Agent is Connected. Delivering Template now...')
# Creates a Temp file to dump the current Boxed content
# self.__dict__ was created by using box.Box()
tmp = self.__make_cmds_tmp_file()
# Dumps the self.__dict__ data into the Temporary file
# This file will be used by the Agent Server to send
# the file's content to the VM network Agent
self.__dump_command_file(tmp)
self.logger.log(self.__dict__, level='DEBUG')
break
self.logger.log('Template Delivered: {}'.format(BphToolTemplateExecutor.template_delivered), level='DEBUG')
while BphToolTemplateExecutor.template_delivered != True:
self.logger.log('Waiting to deliver template...')
time.sleep(5)
self.logger.log('Template has been delivered.')
BphToolTemplateExecutor.template_delivered = False
self.logger.log('Next instruction will be sent in ({}) seconds'.format(delay))
time.sleep(delay)
def output(self, show=False):
def output_conversor(tool_output_log):
self.logger.log('output conversor', level='DEBUG')
tool_output = []
with open(tool_output_log) as tool_log:
for line in tool_log:
if line not in tool_output:
if show: self.logger.log('Adding: {}'.format(line), level='DEBUG')
tool_output.append(line.strip())
return tool_output
tool_output_log = tool_files_folder = os.path.join(Session.sid_folder, self.tool_name, self.rid, "{}.log".format(self.tool_name))
if show: self.logger.log(tool_output_log, level='DEBUG')
while True:
try:
# Don't give any response until the file has arrived
if os.path.isfile(tool_output_log):
self.logger.log('Log file was found', level='DEBUG')
result_data = output_conversor(tool_output_log)
for line in result_data:
self.logger.log('Content: {}'.format(colored(line, 'green')))
return result_data
except FileNotFoundError:
self.logger.log('File has not arrived yet. Retrying in 5 seconds')
time.sleep(5)
self.logger.log('Retrying now...')
self.output(show=show)
def files(self):
time.sleep(5)
tool_files_folder = os.path.join(Session.sid_folder, self.tool_name, self.rid)
self.logger.log('Searching for files now in: {}'.format(tool_files_folder))
files_found = []
while True:
if os.path.isdir(tool_files_folder):
self.logger.log('Directory OK', level='DEBUG')
for root, dirs, files in os.walk(tool_files_folder):
for file in files:
if file not in files_found:
file = os.path.join(root, file)
files_found.append(file)
for file in files_found:
self.logger.log(colored('File: {}'.format(os.path.basename(file)), 'green'))
return files_found
|
nilq/baby-python
|
python
|
import sys
print("Congratulations on installing Python!", '\n')
print("This system is running {}".format(sys.version), '\n')
if "conda" in sys.version:
print("Hello from Anaconda!")
else:
print("Hello from system-installed Python!")
|
nilq/baby-python
|
python
|
from collections import defaultdict
import re
from collections import Counter
print("Reactor Reboot")
with open("day22/day22_1_input.txt", "r") as f:
commands = [entry for entry in f.read().strip().split("\n")]
# print(commands)
cubeDict = defaultdict(bool)
for command in commands:
action, cubePositions = command.split(" ")
positionRange = [[int(startEnd) for startEnd in position.split("=")[1].split("..")] for position in
cubePositions.split(",")]
isOutOfPosition = False
for position in positionRange:
for value in position:
if value < -50 or value > 50:
isOutOfPosition = True
break
if isOutOfPosition:
break
if isOutOfPosition:
continue
for x in range(positionRange[0][0], positionRange[0][1] + 1, 1):
for y in range(positionRange[1][0], positionRange[1][1] + 1, 1):
for z in range(positionRange[2][0], positionRange[2][1] + 1, 1):
# print(x, y, z)
cubeDict[(x, y, z)] = True if action == "on" else False
nbOn = 0
for cube, value in cubeDict.items():
if value:
nbOn +=1
print("rs part1: ", nbOn)
## part2
with open('day22/day22_1_input.txt', 'r') as file:
raw_data = file.read()
def parse_input(raw_data):
res = []
for line in raw_data.split('\n'):
state = int(line.split()[0] == 'on')
x0, x1, y0, y1, z0, z1 = map(int, re.findall('-?\d+', line))
res.append((state, x0, x1, y0, y1, z0, z1))
return res
DATA = parse_input(raw_data)
# print(DATA)
def intersect(cube_a, cube_b):
x0, x1, y0, y1, z0, z1 = cube_a
i0, i1, j0, j1, k0, k1 = cube_b
x_s, y_s, z_s = (
max(a, b) for a, b in
zip((x0, y0, z0), (i0, j0, k0))
)
x_e, y_e, z_e = (
min(a, b) for a, b in
zip((x1, y1, z1), (i1, j1, k1))
)
# print(x_s, y_s, z_s, x_e, y_e, z_e)
if x_s <= x_e and y_s <= y_e and z_s <= z_e:
return x_s, x_e, y_s, y_e, z_s, z_e
return False
def toggle_cubes(step, cubes):
#print("step: ", step, "cubes: ", cubes)
state, cur = step[0], step[1:]
new = Counter()
for cube in cubes:
intsct = intersect(cur, cube)
if intsct:
print("intersect: ",intsct, "cube: ", cube, "cur: ", cur, cubes[cube])
new[intsct] -= cubes[cube] ## if it is on substract 1 for intersection (prevents double checking)
# print("new: ", new)
if state:
cubes[cur] = 1
# print(new)
cubes.update(new)
print(cubes)
print("--------------------------")
return cubes
def calc_toggled(cubes):
res = 0
print("Calculation: ", cubes.items())
for k, v in cubes.items():
x0, x1, y0, y1, z0, z1 = k
print(k)
size = (x1 + 1 - x0) * (y1 + 1 - y0) * (z1 + 1 - z0)
res += size * v
print(res, v)
return res
"""def part_one(steps):
cubes = Counter()
for step in steps:
state, cur = step[0], step[1:]
# print(cur)
cur = intersect(cur, (-50, 50, -50, 50, -50, 50))
if not cur:
continue
cubes = toggle_cubes((state, *cur), cubes)
return calc_toggled(cubes)"""
def part_two(steps):
cubes = Counter()
for step in steps:
cubes = toggle_cubes(step, cubes)
return calc_toggled(cubes)
print("part2.", part_two(DATA))
|
nilq/baby-python
|
python
|
from .fp16_optimizer import FP16_Optimizer
from .fused_adam import FusedAdam
|
nilq/baby-python
|
python
|
"""
An audio URL.
"""
def audio_url():
return 'https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav'
|
nilq/baby-python
|
python
|
###############################################################
# Autogenerated module. Please don't modify. #
# Edit according file in protocol_generator/templates instead #
###############################################################
from typing import Dict
from ...structs.api.offset_fetch_request import OffsetFetchRequestData, Partition, Topic
from ._main_serializers import ArraySerializer, ClassSerializer, Schema, int32Serializer, stringSerializer
partitionSchemas: Dict[int, Schema] = {
0: [("partition", int32Serializer)],
1: [("partition", int32Serializer)],
2: [("partition", int32Serializer)],
3: [("partition", int32Serializer)],
4: [("partition", int32Serializer)],
5: [("partition", int32Serializer)],
}
partitionSerializers: Dict[int, ClassSerializer[Partition]] = {
version: ClassSerializer(Partition, schema) for version, schema in partitionSchemas.items()
}
partitionSerializers[-1] = partitionSerializers[5]
topicSchemas: Dict[int, Schema] = {
0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[0]))],
1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[1]))],
2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[2]))],
3: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[3]))],
4: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[4]))],
5: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[5]))],
}
topicSerializers: Dict[int, ClassSerializer[Topic]] = {
version: ClassSerializer(Topic, schema) for version, schema in topicSchemas.items()
}
topicSerializers[-1] = topicSerializers[5]
offsetFetchRequestDataSchemas: Dict[int, Schema] = {
0: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[0]))],
1: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[1]))],
2: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[2]))],
3: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[3]))],
4: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[4]))],
5: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[5]))],
}
offsetFetchRequestDataSerializers: Dict[int, ClassSerializer[OffsetFetchRequestData]] = {
version: ClassSerializer(OffsetFetchRequestData, schema)
for version, schema in offsetFetchRequestDataSchemas.items()
}
offsetFetchRequestDataSerializers[-1] = offsetFetchRequestDataSerializers[5]
|
nilq/baby-python
|
python
|
import gym
import pybullet as p
import pybullet_data
import os
import numpy as np
from gym import spaces
# Initial joint angles
RESET_VALUES = [
0.015339807878856412,
-1.2931458041875956,
1.0109710760673565,
-1.3537670644267164,
-0.07158577010132992,
.027]
# End-effector boundaries
BOUNDS_XMIN = -100
BOUNDS_XMAX = 100
BOUNDS_YMIN = -100
BOUNDS_YMAX = 100
BOUNDS_ZMIN = -100
BOUNDS_ZMAX = 100
# Joint boundaries
JOINT_MIN = np.array([
-3.1,
-1.571,
-1.571,
-1.745,
-2.617,
0.003
])
JOINT_MAX = np.array([
3.1,
1.571,
1.571,
1.745,
2.617,
0.03
])
class WidowxEnv(gym.Env):
def __init__(self):
"""
Initialise the environment
"""
self.goal_oriented = True
# Define action space
self.action_space = spaces.Box(
low=np.float32(np.array([-0.5, -0.25, -0.25, -0.25, -0.5, -0.005]) / 30),
high=np.float32(np.array([0.5, 0.25, 0.25, 0.25, 0.5, 0.005]) / 30),
dtype=np.float32)
# Define observation space
self.obs_space_low = np.float32(
np.array([-.16, -.15, 0.14, -3.1, -1.6, -1.6, -1.8, -3.1, 0]))
self.obs_space_high = np.float32(
np.array([.16, .15, .41, 3.1, 1.6, 1.6, 1.8, 3.1, 0.05]))
self.observation_space = spaces.Box(
low=np.float32(self.obs_space_low),
high=np.float32(self.obs_space_high),
dtype=np.float32)
if self.goal_oriented:
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(low=np.float32(np.array([-.16, -.15, 0.25])), high=np.float32(np.array([.16, .15, 0.41])), dtype=np.float32),
achieved_goal=spaces.Box(low=np.float32(self.obs_space_low[:3]), high=np.float32(self.obs_space_high[:3]), dtype=np.float32),
observation=self.observation_space
))
self.current_pos = None
# Initialise the goal position
self.goal = np.array([.14, .0, 0.26]) # Fixed goal
# self.set_goal(self.sample_goal_for_rollout()) # Random goal
# Connect to physics client. By default, do not render
self.physics_client = p.connect(p.DIRECT)
# Load URDFs
self.create_world()
def create_world(self):
# Initialise camera angle
p.resetDebugVisualizerCamera(
cameraDistance=0.6,
cameraYaw=0,
cameraPitch=-30,
cameraTargetPosition=[0.2, 0, 0.1],
physicsClientId=self.physics_client)
# Load robot, sphere and plane urdf
p.setAdditionalSearchPath(pybullet_data.getDataPath())
path = os.path.abspath(os.path.dirname(__file__))
self.arm = p.loadURDF(
os.path.join(
path,
"URDFs/widowx/widowx.urdf"),
useFixedBase=True)
self.sphere = p.loadURDF(
os.path.join(
path,
"URDFs/sphere.urdf"),
useFixedBase=True)
self.plane = p.loadURDF('plane.urdf')
# reset environment
self.reset()
def sample_goal_for_rollout(self):
""" Sample random goal coordinates """
return np.random.uniform(low=np.array(
[-.14, -.13, 0.26]), high=np.array([.14, .13, .39]))
def set_goal(self, goal):
self.goal = goal
def step(self, action):
"""
Execute the action.
Parameters
----------
action : array holding the angles changes from the previous time step [δ1, δ2, δ3, δ4, δ5, δ6]
Returns
-------
obs, reward, episode_over, info : tuple
obs (object) :
Either [xe, ye, ze, θ1, θ2, θ3, θ4, θ5, θ6] for a Gym env
or an observation dict for a goal env
reward (float) :
Negative, squared, l2 distance between current position and goal position
episode_over (bool) :
Whether or not we have reached the goal
info (dict) :
Additional information
"""
self.action = np.array(action, dtype=np.float32)
# Retrive current joint position and velocities
# (note that velocities are always 0 due to the force joint reset)
self.joint_positions, self.joint_velocities = self._get_current_joint_positions()
# Update the new joint position with the action
self.new_joint_positions = self.joint_positions + self.action
# Clip the joint position to fit the joint's allowed boundaries
self.new_joint_positions = np.clip(
np.array(self.new_joint_positions),
JOINT_MIN,
JOINT_MAX)
# Instantaneously reset the joint position (no torque applied)
self._force_joint_positions(self.new_joint_positions)
# Retrieve the end effector position.
# If it's outside the boundaries defined, don't update the joint
# position
end_effector_pos = self._get_current_end_effector_position()
x, y, z = end_effector_pos[0], end_effector_pos[1], end_effector_pos[2]
conditions = [
x <= BOUNDS_XMAX,
x >= BOUNDS_XMIN,
y <= BOUNDS_YMAX,
y >= BOUNDS_YMIN,
z <= BOUNDS_ZMAX,
z >= BOUNDS_ZMIN
]
violated_boundary = False
for condition in conditions:
if not condition:
violated_boundary = True
break
if violated_boundary:
self._force_joint_positions(self.joint_positions)
# Backup old position and get current joint position and current end
# effector position
self.old_pos = self.current_pos
self.current_pos = self._get_current_state()
return self._generate_step_tuple()
def _generate_step_tuple(self):
""" return (obs, reward, episode_over, info) tuple """
# Reward
reward = self._get_reward(self.goal)
# Info
self.old_distance = np.linalg.norm(self.old_pos[:3] - self.goal)
self.new_distance = np.linalg.norm(self.current_pos[:3] - self.goal)
info = {}
info['new_distance'] = self.new_distance
info['old_distance'] = self.old_distance
info['goal_position'] = self.goal
info['tip_position'] = self.current_pos[:3]
info['old_joint_pos'] = self.joint_positions
info['new_joint_pos'] = self.new_joint_positions
info['joint_vel'] = self.joint_velocities
# Never end episode prematurily
episode_over = False
# if self.new_distance < 0.0005:
# episode_over = True
if self.goal_oriented:
obs = self._get_obs()
return obs, reward, episode_over, info
return self.current_pos, reward, episode_over, info
def reset(self):
"""
Reset robot and goal at the beginning of an episode
Return observation
"""
# Reset robot at the origin and move sphere to the goal position
p.resetBasePositionAndOrientation(
self.arm, [0, 0, 0], p.getQuaternionFromEuler([np.pi, np.pi, np.pi]))
p.resetBasePositionAndOrientation(
self.sphere, self.goal, p.getQuaternionFromEuler([np.pi, np.pi, np.pi]))
# Reset joint at initial angles and get current state
self._force_joint_positions(RESET_VALUES)
self.current_pos = self._get_current_state()
if self.goal_oriented:
return self._get_obs()
return self.current_pos
def _get_obs(self):
""" return goal_oriented observation """
obs = {}
obs['observation'] = self.current_pos
obs['desired_goal'] = self.goal
obs['achieved_goal'] = self.current_pos[:3]
return obs
def _get_reward(self, goal):
""" Calculate the reward as - distance **2 """
return - (np.linalg.norm(self.current_pos[:3] - goal) ** 2)
def render(self, mode='human'):
""" Render Pybullet simulation """
p.disconnect(self.physics_client)
self.physics_client = p.connect(p.GUI)
self.create_world()
def compute_reward(self, achieved_goal, goal, info):
""" Function necessary for goal Env"""
return - (np.linalg.norm(achieved_goal - goal)**2)
def _get_current_joint_positions(self):
""" Return current joint position and velocities """
joint_positions = []
joint_velocities = []
for i in range(6):
joint_positions.append(p.getJointState(self.arm, i)[0])
joint_velocities.append(p.getJointState(self.arm, i)[1])
return np.array(
joint_positions, dtype=np.float32), np.array(
joint_velocities, dtype=np.float32)
def _get_current_end_effector_position(self):
""" Get end effector coordinates """
return np.array(
list(
p.getLinkState(
self.arm,
5,
computeForwardKinematics=1)[4]))
def _set_joint_positions(self, joint_positions):
""" Position control (not reset) """
# In Pybullet, gripper halves are controlled separately
joint_positions = list(joint_positions) + [joint_positions[-1]]
p.setJointMotorControlArray(
self.arm,
[0, 1, 2, 3, 4, 7, 8],
controlMode=p.POSITION_CONTROL,
targetPositions=joint_positions
)
def _force_joint_positions(self, joint_positions):
""" Instantaneous reset of the joint angles (not position control) """
for i in range(5):
p.resetJointState(
self.arm,
i,
joint_positions[i]
)
# In Pybullet, gripper halves are controlled separately
for i in range(7, 9):
p.resetJointState(
self.arm,
i,
joint_positions[-1]
)
def _get_current_state(self):
""" Return observation: end effector position + current joint position """
return np.concatenate(
[self._get_current_end_effector_position(),
self._get_current_joint_positions()[0]],
axis=0)
|
nilq/baby-python
|
python
|
from discord import Embed
async def compose_embed(bot, msg, message):
names = {
"user_name": msg.author.display_name,
"user_icon": msg.author.avatar_url,
"channel_name": msg.channel.name,
"guild_name": msg.guild.name,
"guild_icon": msg.guild.icon_url
}
if msg.guild != message.guild:
names = await update_names(bot, msg, names)
embed_type = await get_embed_type(bot, message)
embed_color = await get_embed_color(bot, message)
if embed_type == 1:
embed = await Compose.type_1(msg, message, names, embed_color)
else:
embed = await Compose.type_1(msg, message, names, embed_color)
return embed, embed_type
async def update_names(bot, msg, names):
guild_anonymity = await bot.check.anonymity(bot.guilds_data, msg.guild.id)
user_anonymity = await bot.check.anonymity(bot.users_data, msg.author.id)
if user_anonymity is None:
if guild_anonymity:
names["user_name"] = '匿名ユーザー'
names["user_icon"] = 'https://discord.com/assets/7c8f476123d28d103efe381543274c25.png'
else:
names["user_name"] = msg.author.display_name
names["user_icon"] = msg.author.avatar_url
if user_anonymity is True:
names["user_name"] = '匿名ユーザー'
names["user_icon"] = 'https://discord.com/assets/7c8f476123d28d103efe381543274c25.png'
if user_anonymity is False:
names["user_name"] = msg.author.display_name
names["user_icon"] = msg.author.avatar_url
return names
async def get_embed_type(bot, message):
user_data = bot.users_data.get(str(message.author.id))
if user_data:
return user_data.get('embed_type')
guild_data = bot.guilds_data.get(str(message.guild.id))
if guild_data:
return guild_data.get('embed_type')
return 1
async def get_embed_color(bot, message):
user_data = bot.users_data.get(str(message.author.id))
if user_data:
return user_data.get('embed_color')
guild_data = bot.guilds_data.get(str(message.guild.id))
if guild_data:
return guild_data.get('embed_color')
return '000000'
class Compose:
async def type_1(msg, message, names, embed_color):
embed = Embed(
description=msg.content,
timestamp=msg.created_at,
url=f'{message.jump_url}?{message.author.id}',
colour=int(f'0x{embed_color}', 16)
)
embed.set_author(
name=names["user_name"],
icon_url=names["user_icon"],
url=f'{msg.jump_url}?{msg.author.id}'
)
if names.get('category_name') is None:
channel_txt = f'#{names["channel_name"]}'
else:
channel_txt = f'#{names["category_name"]}/{names["channel_name"]}'
if msg.guild == message.guild:
footer_txt = f'{channel_txt} | Quoted by {str(message.author)}'
else:
footer_txt = f'@{names["guild_name"]} | {channel_txt} | Quoted by {str(message.author)}'
embed.set_footer(
text=footer_txt,
icon_url=names["guild_icon"],
)
if msg.attachments and msg.attachments[0].proxy_url:
embed.set_image(
url=msg.attachments[0].proxy_url
)
return embed
|
nilq/baby-python
|
python
|
import time
import datetime
from haste_storage_client.core import HasteStorageClient, OS_SWIFT_STORAGE, TRASH
from haste_storage_client.interestingness_model import RestInterestingnessModel
haste_storage_client_config = {
'haste_metadata_server': {
# See: https://docs.mongodb.com/manual/reference/connection-string/
'connection_string': 'mongodb://130.xxx.yy.zz:27017'
},
'os_swift': {
# See: https://docs.openstack.org/keystoneauth/latest/
# api/keystoneauth1.identity.v3.html#module-keystoneauth1.identity.v3.password
'username': 'xxxxx',
'password': 'xxxx',
'project_name': 'xxxxx',
'user_domain_name': 'xxxx',
'auth_url': 'xxxxx',
'project_domain_name': 'xxxx'
}
}
# Identifies both the experiment, and the session (ie. unique each time the stream starts),
# for example, this would be a good format - this needs to be generated at the stream edge.
initials = 'anna_exampleson'
stream_id = datetime.datetime.today().strftime('%Y_%m_%d__%H_%M_%S') + '_exp1_' + initials
print('stream ID is: %s' % stream_id)
# Optionally, specify REST server with interesting model:
interestingness_model = RestInterestingnessModel('http://localhost:5000/model/api/v0.1/evaluate')
client = HasteStorageClient(stream_id,
config=haste_storage_client_config,
interestingness_model=interestingness_model,
storage_policy=[(0.5, 1.0, OS_SWIFT_STORAGE)], # map 0.5<=interestingness<=1.0 to OS swift.
default_storage=TRASH) # discard blobs which don't match the policy above.
blob_bytes = b'this is a binary blob eg. image data.'
timestamp_cloud_edge = time.time()
substream_id = 'B13' # Group by microscopy well ID.
client.save(timestamp_cloud_edge,
(12.34, 56.78),
substream_id,
blob_bytes,
{'image_height_pixels': 300, # bag of extracted features here
'image_width_pixels': 300,
'number_of_green_pixels': 1234})
client.close()
|
nilq/baby-python
|
python
|
"""Checkmarx CxSAST source up-to-dateness collector."""
from dateutil.parser import parse
from collector_utilities.functions import days_ago
from collector_utilities.type import Value
from source_model import SourceResponses
from .base import CxSASTBase
class CxSASTSourceUpToDateness(CxSASTBase):
"""Collector class to measure the up-to-dateness of a Checkmarx CxSAST scan."""
async def _parse_value(self, responses: SourceResponses) -> Value:
"""Override to parse the date and time of the most recent scan."""
scan = (await responses[0].json())[0]
return str(days_ago(parse(scan["dateAndTime"]["finishedOn"])))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import argparse
import re
import sys
# Prevent creation of compiled bytecode files
sys.dont_write_bytecode = True
from core.framework import cli
from core.utils.printer import Colors
# ======================================================================================================================
# Setup command completion and run the UI
# ======================================================================================================================
def launch_ui(args):
# Setup tab completion
try:
import readline
except ImportError:
print('%s[!] Module \'readline\' not available. Tab complete disabled.%s' % (Colors.R, Colors.N))
else:
import rlcompleter
if 'libedit' in readline.__doc__:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
readline.set_completer_delims(re.sub('[/-]', '', readline.get_completer_delims()))
# Instantiate the UI object
x = cli.CLI()
# Check for and run script session
if args.script_file:
x.do_resource(args.script_file)
# Run the UI
try:
x.cmdloop()
except KeyboardInterrupt:
print('')
# ======================================================================================================================
# MAIN
# ======================================================================================================================
def main():
description = '%%(prog)s - %s %s' % (cli.__author__, cli.__email__)
parser = argparse.ArgumentParser(description=description, version=cli.__version__)
parser.add_argument('-r', help='load commands from a resource file', metavar='filename', dest='script_file', action='store')
args = parser.parse_args()
launch_ui(args)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from copy import copy
def _rec(arr, n, m):
if n < 1:
return
yield from _rec(arr, n-1, m)
for i in range(1,m):
arr_loop = copy(arr)
arr_loop[n-1] = i
yield arr_loop
yield from _rec(arr_loop, n-1, m)
def main(n, m):
arr = [0]*n
yield arr
yield from _rec(arr, n-1, m)
for i in range(1,m):
arr_loop = copy(arr)
arr_loop[n-1] = i
yield arr_loop
yield from _rec(arr_loop, n-1, m)
if __name__ == "__main__":
for arr in main(4, 3):
print(arr)
|
nilq/baby-python
|
python
|
___assertEqual(0**17, 0)
___assertEqual(17**0, 1)
___assertEqual(0**0, 1)
___assertEqual(17**1, 17)
___assertEqual(2**10, 1024)
___assertEqual(2**-2, 0.25)
|
nilq/baby-python
|
python
|
from libqtile.backend.x11 import core
def test_keys(display):
assert "a" in core.get_keys()
assert "shift" in core.get_modifiers()
def test_no_two_qtiles(manager):
try:
core.Core(manager.display).finalize()
except core.ExistingWMException:
pass
else:
raise Exception("expected an error on multiple qtiles connecting")
|
nilq/baby-python
|
python
|
# Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from vizier.core.util import dump_json, load_json
from vizier.datastore.annotation.base import DatasetAnnotation
from vizier.datastore.annotation.dataset import DatasetMetadata
from vizier.datastore.dataset import DatasetHandle, DatasetColumn, DatasetRow
from vizier.datastore.mimir.reader import MimirDatasetReader
import vizier.mimir as mimir
"""Mimir annotation keys."""
ANNO_UNCERTAIN = 'mimir:uncertain'
"""Value casts for SQL update statements."""
CAST_TRUE = 'CAST(1 AS BOOL)'
CAST_FALSE = 'CAST(0 AS BOOL)'
"""Compiled regular expressions to identify valid date and datetime values.
Note that this does not check if a date string actually specifies a valid
calendar date. But it appears that Mimir accepts any sting that follows this
format."""
DATE_FORMAT = re.compile('^\d{4}-\d\d?-\d\d?$')
DATETIME_FORMAT = re.compile('^\d{4}-\d\d?-\d\d? \d\d?:\d\d?:\d\d?(\.\d+)?$')
class MimirDatasetColumn(DatasetColumn):
"""Column in a dataset that is stored as a Mimir table or view. Given that
column names are not necessarily unique in a dataset, there is a need to
maintain a mapping of dataset names to attribute names for tables/views in
the relational database.
Attributes
----------
identifier: int
Unique column identifier
name: string
Name of column in the dataset
name_in_rdb: string
Name of the corresponding attribute in a relational table or views
data_type: string, optional
String representation of the column type in the database. By now the
following data_type values are expected: date (format yyyy-MM-dd), int,
varchar, real, and datetime (format yyyy-MM-dd hh:mm:ss:zzzz).
"""
def __init__(self, identifier=None, name_in_dataset=None, name_in_rdb=None, data_type=None):
"""Initialize the dataset column.
Parameters
----------
identifier: int
Unique column identifier
name_in_dataset: string
Name of column in the dataset
name_in_rdb: string, optional
Name of the corresponding attribute in a relational table or views
data_type: string, optional
Identifier for data type of column values. Default is String
"""
# Ensure that a valid data type is given
super(MimirDatasetColumn, self).__init__(
identifier=identifier,
name=name_in_dataset,
data_type=data_type
)
if not name_in_rdb is None:
self.name_in_rdb = name_in_rdb.upper()
else:
self.name_in_rdb = name_in_dataset.upper()
@staticmethod
def from_dict(doc):
"""Create dataset column object from dictionary serialization.
Parameters
----------
doc: dict
Dictionary serialization for dataset column object
Returns
-------
vizier.datastore.mimir.DatasetColumn
"""
return MimirDatasetColumn(
identifier=doc['id'],
name_in_dataset=doc['name'],
name_in_rdb=doc['rdbName'],
data_type=doc['dataType']
)
def is_numeric(self):
"""Flag indicating if the data type of this column is numeric, i.e.,
integer or real.
Returns
-------
bool
"""
return self.data_type.lower() in ['int', 'real']
def to_dict(self):
"""Get dictionary serialization for dataset column object.
Returns
-------
dict
"""
return {
'id': self.identifier,
'name': self.name,
'rdbName': self.name_in_rdb,
'dataType': self.data_type
}
def to_sql_value(self, value):
"""Return an SQL conform representation of the given value based on the
column's data type.
Raises ValueError if the column type is numeric but the given value
cannot be converted to a numeric value.
Parameters
----------
value: string
Dataset cell value
Returns
-------
string
"""
# If the given value is None simply return the keyword NULL
if value is None:
return 'NULL'
# If the data type of the columns is numeric (int or real) try to
# convert the given argument to check whether it actually is a numeric
# value. Note that we always return a string beacuse the result is
# intended to be concatenated as part of a SQL query string.
if self.data_type.lower() in ['int', 'real']:
try:
int(value)
return str(value)
except ValueError:
return str(float(value))
elif self.data_type.lower() == 'date':
if DATE_FORMAT.match(value):
return 'CAST(\'' + str(value) + '\' AS DATE)'
raise ValueError('not a date \'' + str(value) + '\'')
elif self.data_type.lower() == 'datetime':
if DATETIME_FORMAT.match(value):
return 'CAST(\'' + str(value) + '\' AS DATETIME)'
raise ValueError('not a datetime \'' + str(value) + '\'')
elif self.data_type.lower() == 'bool':
if isinstance(value, bool):
if value:
return CAST_TRUE
else:
return CAST_FALSE
elif isinstance(value, int):
if value == 1:
return CAST_TRUE
elif value == 0:
return CAST_FALSE
else:
str_val = str(value).upper()
if str_val in ['TRUE', '1']:
return CAST_TRUE
elif str_val in ['FALSE', '0']:
return CAST_FALSE
# If none of the previous tests returned a bool representation we
# raise an exception to trigger value casting.
raise ValueError('not a boolean value \'' + str(value) + '\'')
#elif self.data_type.lower() in ['date', 'datetime']:
#return self.data_type.upper() + '(\'' + str(value) + '\')'
# return 'DATE(\'' + str(value) + '\')'
# By default and in case the given value could not be transformed into
# the target format return a representation for a string value
return '\'' + str(value) + '\''
MIMIR_ROWID_COL= MimirDatasetColumn( name_in_dataset='', data_type='rowid')
class MimirDatasetHandle(DatasetHandle):
"""Internal descriptor for datasets managed by the Mimir data store.
Contains mapping for column names from a dataset to the corresponding object
in a relational and a reference to the table or view that contains the
dataset.
"""
def __init__(
self, identifier, columns, table_name,
row_counter, annotations=None, name=None
):
"""Initialize the descriptor.
Parameters
----------
identifier: string
Unique dataset identifier
columns: list(vizier.datastore.mimir.MimirDatasetColumn)
List of column names in the dataset schema and their corresponding
names in the relational database table or view.
table_name: string
Reference to relational database table containing the dataset.
row_counter: int
Counter for unique row ids
annotations: vizier.datastore.annotation.dataset.DatasetMetadata
Annotations for dataset components
"""
super(MimirDatasetHandle, self).__init__(
identifier=identifier,
columns=columns,
row_count=row_counter,
annotations=annotations,
name=name
)
self.table_name = table_name
self.row_counter = row_counter
@staticmethod
def from_file(filename, annotations=None):
"""Read dataset from file. Expects the file to be in Json format which
is the default serialization format used by to_file().
Parameters
----------
filename: string
Name of the file to read.
annotations: vizier.datastore.annotation.dataset.DatasetMetadata, optional
Annotations for dataset components
Returns
-------
vizier.datastore.mimir.dataset.MimirDatasetHandle
"""
with open(filename, 'r') as f:
doc = load_json(f.read())
return MimirDatasetHandle(
identifier=doc['id'],
columns=[MimirDatasetColumn.from_dict(obj) for obj in doc['columns']],
table_name=doc['tableName'],
row_counter=doc['rowCounter']
)
def get_annotations(self, column_id=None, row_id=None):
"""Get list of annotations for a dataset component. If both identifier
equal -1 all annotations for a dataset are returned.
Parameters
----------
column_id: int, optional
Unique column identifier
row_id: string, optional
Unique row identifier
Returns
-------
list(vizier.datastpre.annotation.base.DatasetAnnotation)
"""
if column_id is None and row_id is None:
# TODO: If there is an option to get all annotations from Mimir for
# all dataset cells we should add those annotations here. By now
# this command will only return user-defined annotations for the
# dataset.
annotations = []
sql = 'SELECT * '
sql += 'FROM ' + self.table_name + ' '
annoList = mimir.explainEverythingJson(sql)
for anno in annoList:
annotations.append(
DatasetAnnotation(
key=ANNO_UNCERTAIN,
value=anno
)
)
#return [item for sublist in map(lambda (i,x): self.annotations.for_column(i).values(), enumerate(self.columns)) for item in sublist]
#return self.annotations.values
return annotations
elif row_id is None:
return self.annotations.for_column(column_id)
elif column_id is None:
return self.annotations.for_row(row_id)
else:
annotations = self.annotations.for_cell(
column_id=column_id,
row_id=row_id
)
column = self.column_by_id(column_id)
sql = 'SELECT * '
sql += 'FROM ' + self.table_name + ' '
buffer = mimir.explainCell(sql, column.name_in_rdb, str(row_id))
has_reasons = len(buffer) > 0
if has_reasons:
for value in buffer:
value = value['english']
if value != '':
annotations.append(
DatasetAnnotation(
key=ANNO_UNCERTAIN,
value=value,
column_id=column_id,
row_id=row_id
)
)
return annotations
def max_row_id(self):
"""Get maximum identifier for all rows in the dataset. If the dataset
is empty the result is -1.
Returns
-------
int
"""
return self.row_counter
def reader(self, offset=0, limit=-1, rowid=None):
"""Get reader for the dataset to access the dataset rows. The optional
offset amd limit parameters are used to retrieve only a subset of
rows.
Parameters
----------
offset: int, optional
Number of rows at the beginning of the list that are skipped.
limit: int, optional
Limits the number of rows that are returned.
Returns
-------
vizier.datastore.mimir.MimirDatasetReader
"""
return MimirDatasetReader(
table_name=self.table_name,
columns=self.columns,
offset=offset,
limit=limit,
rowid=rowid
)
def to_file(self, filename):
"""Write dataset to file. The default serialization format is Json.
Parameters
----------
filename: string
Name of the file to write
"""
doc = {
'id': self.identifier,
'columns': [col.to_dict() for col in self.columns],
'tableName': str(self.table_name),
'rowCounter': self.row_counter
}
with open(filename, 'w') as f:
dump_json(doc, f)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# author: https://blog.furas.pl
# date: 2020.07.08
#
import requests
import pandas as pd
url = "https://www.pokemondb.net/pokedex/all"
html = requests.get(url)
dfs = pd.read_html(html.text)
print( dfs )
|
nilq/baby-python
|
python
|
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import logging
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def exists(self, table, tget):
"""
Test for the existence of columns in the table, as specified in the TGet.
@return true if the specified TGet matches one or more keys, false if not
Parameters:
- table: the table to check on
- tget: the TGet to check for
"""
pass
def existsAll(self, table, tgets):
"""
Test for the existence of columns in the table, as specified by the TGets.
This will return an array of booleans. Each value will be true if the related Get matches
one or more keys, false if not.
Parameters:
- table: the table to check on
- tgets: a list of TGets to check for
"""
pass
def get(self, table, tget):
"""
Method for getting data from a row.
If the row cannot be found an empty Result is returned.
This can be checked by the empty field of the TResult
@return the result
Parameters:
- table: the table to get from
- tget: the TGet to fetch
"""
pass
def getMultiple(self, table, tgets):
"""
Method for getting multiple rows.
If a row cannot be found there will be a null
value in the result list for that TGet at the
same position.
So the Results are in the same order as the TGets.
Parameters:
- table: the table to get from
- tgets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
pass
def put(self, table, tput):
"""
Commit a TPut to a table.
Parameters:
- table: the table to put data in
- tput: the TPut to put
"""
pass
def checkAndPut(self, table, row, family, qualifier, value, tput):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the TPut.
@return true if the new put was executed, false otherwise
Parameters:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tput: the TPut to put if the check succeeds
"""
pass
def putMultiple(self, table, tputs):
"""
Commit a List of Puts to the table.
Parameters:
- table: the table to put data in
- tputs: a list of TPuts to commit
"""
pass
def deleteSingle(self, table, tdelete):
"""
Deletes as specified by the TDelete.
Note: "delete" is a reserved keyword and cannot be used in Thrift
thus the inconsistent naming scheme from the other functions.
Parameters:
- table: the table to delete from
- tdelete: the TDelete to delete
"""
pass
def deleteMultiple(self, table, tdeletes):
"""
Bulk commit a List of TDeletes to the table.
Throws a TIOError if any of the deletes fail.
Always returns an empty list for backwards compatibility.
Parameters:
- table: the table to delete from
- tdeletes: list of TDeletes to delete
"""
pass
def checkAndDelete(self, table, row, family, qualifier, value, tdelete):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the delete.
@return true if the new delete was executed, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tdelete: the TDelete to execute if the check succeeds
"""
pass
def increment(self, table, tincrement):
"""
Parameters:
- table: the table to increment the value on
- tincrement: the TIncrement to increment
"""
pass
def append(self, table, tappend):
"""
Parameters:
- table: the table to append the value on
- tappend: the TAppend to append
"""
pass
def openScanner(self, table, tscan):
"""
Get a Scanner for the provided TScan object.
@return Scanner Id to be used with other scanner procedures
Parameters:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
"""
pass
def getScannerRows(self, scannerId, numRows):
"""
Grabs multiple rows from a Scanner.
@return Between zero and numRows TResults
Parameters:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
pass
def closeScanner(self, scannerId):
"""
Closes the scanner. Should be called to free server side resources timely.
Typically close once the scanner is not needed anymore, i.e. after looping
over it to get all the required rows.
Parameters:
- scannerId: the Id of the Scanner to close *
"""
pass
def mutateRow(self, table, trowMutations):
"""
mutateRow performs multiple mutations atomically on a single row.
Parameters:
- table: table to apply the mutations
- trowMutations: mutations to apply
"""
pass
def getScannerResults(self, table, tscan, numRows):
"""
Get results for the provided TScan object.
This helper function opens a scanner, get the results and close the scanner.
@return between zero and numRows TResults
Parameters:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
- numRows: number of rows to return
"""
pass
def getRegionLocation(self, table, row, reload):
"""
Given a table and a row get the location of the region that
would contain the given row key.
reload = true means the cache will be cleared and the location
will be fetched from meta.
Parameters:
- table
- row
- reload
"""
pass
def getAllRegionLocations(self, table):
"""
Get all of the region locations for a given table.
Parameters:
- table
"""
pass
def checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it mutates the row.
@return true if the row was mutated, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- compareOp: comparison to make on the value
- value: the expected value to be compared against, if not provided the
check is for the non-existence of the column in question
- rowMutations: row mutations to execute if the value matches
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def exists(self, table, tget):
"""
Test for the existence of columns in the table, as specified in the TGet.
@return true if the specified TGet matches one or more keys, false if not
Parameters:
- table: the table to check on
- tget: the TGet to check for
"""
self.send_exists(table, tget)
return self.recv_exists()
def send_exists(self, table, tget):
self._oprot.writeMessageBegin('exists', TMessageType.CALL, self._seqid)
args = exists_args()
args.table = table
args.tget = tget
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_exists(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = exists_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "exists failed: unknown result")
def existsAll(self, table, tgets):
"""
Test for the existence of columns in the table, as specified by the TGets.
This will return an array of booleans. Each value will be true if the related Get matches
one or more keys, false if not.
Parameters:
- table: the table to check on
- tgets: a list of TGets to check for
"""
self.send_existsAll(table, tgets)
return self.recv_existsAll()
def send_existsAll(self, table, tgets):
self._oprot.writeMessageBegin('existsAll', TMessageType.CALL, self._seqid)
args = existsAll_args()
args.table = table
args.tgets = tgets
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_existsAll(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = existsAll_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "existsAll failed: unknown result")
def get(self, table, tget):
"""
Method for getting data from a row.
If the row cannot be found an empty Result is returned.
This can be checked by the empty field of the TResult
@return the result
Parameters:
- table: the table to get from
- tget: the TGet to fetch
"""
self.send_get(table, tget)
return self.recv_get()
def send_get(self, table, tget):
self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
args = get_args()
args.table = table
args.tget = tget
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result")
def getMultiple(self, table, tgets):
"""
Method for getting multiple rows.
If a row cannot be found there will be a null
value in the result list for that TGet at the
same position.
So the Results are in the same order as the TGets.
Parameters:
- table: the table to get from
- tgets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
self.send_getMultiple(table, tgets)
return self.recv_getMultiple()
def send_getMultiple(self, table, tgets):
self._oprot.writeMessageBegin('getMultiple', TMessageType.CALL, self._seqid)
args = getMultiple_args()
args.table = table
args.tgets = tgets
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getMultiple(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getMultiple_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getMultiple failed: unknown result")
def put(self, table, tput):
"""
Commit a TPut to a table.
Parameters:
- table: the table to put data in
- tput: the TPut to put
"""
self.send_put(table, tput)
self.recv_put()
def send_put(self, table, tput):
self._oprot.writeMessageBegin('put', TMessageType.CALL, self._seqid)
args = put_args()
args.table = table
args.tput = tput
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_put(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = put_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def checkAndPut(self, table, row, family, qualifier, value, tput):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the TPut.
@return true if the new put was executed, false otherwise
Parameters:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tput: the TPut to put if the check succeeds
"""
self.send_checkAndPut(table, row, family, qualifier, value, tput)
return self.recv_checkAndPut()
def send_checkAndPut(self, table, row, family, qualifier, value, tput):
self._oprot.writeMessageBegin('checkAndPut', TMessageType.CALL, self._seqid)
args = checkAndPut_args()
args.table = table
args.row = row
args.family = family
args.qualifier = qualifier
args.value = value
args.tput = tput
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndPut(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = checkAndPut_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndPut failed: unknown result")
def putMultiple(self, table, tputs):
"""
Commit a List of Puts to the table.
Parameters:
- table: the table to put data in
- tputs: a list of TPuts to commit
"""
self.send_putMultiple(table, tputs)
self.recv_putMultiple()
def send_putMultiple(self, table, tputs):
self._oprot.writeMessageBegin('putMultiple', TMessageType.CALL, self._seqid)
args = putMultiple_args()
args.table = table
args.tputs = tputs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_putMultiple(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = putMultiple_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteSingle(self, table, tdelete):
"""
Deletes as specified by the TDelete.
Note: "delete" is a reserved keyword and cannot be used in Thrift
thus the inconsistent naming scheme from the other functions.
Parameters:
- table: the table to delete from
- tdelete: the TDelete to delete
"""
self.send_deleteSingle(table, tdelete)
self.recv_deleteSingle()
def send_deleteSingle(self, table, tdelete):
self._oprot.writeMessageBegin('deleteSingle', TMessageType.CALL, self._seqid)
args = deleteSingle_args()
args.table = table
args.tdelete = tdelete
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteSingle(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteSingle_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteMultiple(self, table, tdeletes):
"""
Bulk commit a List of TDeletes to the table.
Throws a TIOError if any of the deletes fail.
Always returns an empty list for backwards compatibility.
Parameters:
- table: the table to delete from
- tdeletes: list of TDeletes to delete
"""
self.send_deleteMultiple(table, tdeletes)
return self.recv_deleteMultiple()
def send_deleteMultiple(self, table, tdeletes):
self._oprot.writeMessageBegin('deleteMultiple', TMessageType.CALL, self._seqid)
args = deleteMultiple_args()
args.table = table
args.tdeletes = tdeletes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteMultiple(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteMultiple_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteMultiple failed: unknown result")
def checkAndDelete(self, table, row, family, qualifier, value, tdelete):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the delete.
@return true if the new delete was executed, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tdelete: the TDelete to execute if the check succeeds
"""
self.send_checkAndDelete(table, row, family, qualifier, value, tdelete)
return self.recv_checkAndDelete()
def send_checkAndDelete(self, table, row, family, qualifier, value, tdelete):
self._oprot.writeMessageBegin('checkAndDelete', TMessageType.CALL, self._seqid)
args = checkAndDelete_args()
args.table = table
args.row = row
args.family = family
args.qualifier = qualifier
args.value = value
args.tdelete = tdelete
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndDelete(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = checkAndDelete_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndDelete failed: unknown result")
def increment(self, table, tincrement):
"""
Parameters:
- table: the table to increment the value on
- tincrement: the TIncrement to increment
"""
self.send_increment(table, tincrement)
return self.recv_increment()
def send_increment(self, table, tincrement):
self._oprot.writeMessageBegin('increment', TMessageType.CALL, self._seqid)
args = increment_args()
args.table = table
args.tincrement = tincrement
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_increment(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = increment_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "increment failed: unknown result")
def append(self, table, tappend):
"""
Parameters:
- table: the table to append the value on
- tappend: the TAppend to append
"""
self.send_append(table, tappend)
return self.recv_append()
def send_append(self, table, tappend):
self._oprot.writeMessageBegin('append', TMessageType.CALL, self._seqid)
args = append_args()
args.table = table
args.tappend = tappend
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_append(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = append_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "append failed: unknown result")
def openScanner(self, table, tscan):
"""
Get a Scanner for the provided TScan object.
@return Scanner Id to be used with other scanner procedures
Parameters:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
"""
self.send_openScanner(table, tscan)
return self.recv_openScanner()
def send_openScanner(self, table, tscan):
self._oprot.writeMessageBegin('openScanner', TMessageType.CALL, self._seqid)
args = openScanner_args()
args.table = table
args.tscan = tscan
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_openScanner(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = openScanner_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "openScanner failed: unknown result")
def getScannerRows(self, scannerId, numRows):
"""
Grabs multiple rows from a Scanner.
@return Between zero and numRows TResults
Parameters:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
self.send_getScannerRows(scannerId, numRows)
return self.recv_getScannerRows()
def send_getScannerRows(self, scannerId, numRows):
self._oprot.writeMessageBegin('getScannerRows', TMessageType.CALL, self._seqid)
args = getScannerRows_args()
args.scannerId = scannerId
args.numRows = numRows
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getScannerRows(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getScannerRows_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "getScannerRows failed: unknown result")
def closeScanner(self, scannerId):
"""
Closes the scanner. Should be called to free server side resources timely.
Typically close once the scanner is not needed anymore, i.e. after looping
over it to get all the required rows.
Parameters:
- scannerId: the Id of the Scanner to close *
"""
self.send_closeScanner(scannerId)
self.recv_closeScanner()
def send_closeScanner(self, scannerId):
self._oprot.writeMessageBegin('closeScanner', TMessageType.CALL, self._seqid)
args = closeScanner_args()
args.scannerId = scannerId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_closeScanner(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = closeScanner_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def mutateRow(self, table, trowMutations):
"""
mutateRow performs multiple mutations atomically on a single row.
Parameters:
- table: table to apply the mutations
- trowMutations: mutations to apply
"""
self.send_mutateRow(table, trowMutations)
self.recv_mutateRow()
def send_mutateRow(self, table, trowMutations):
self._oprot.writeMessageBegin('mutateRow', TMessageType.CALL, self._seqid)
args = mutateRow_args()
args.table = table
args.trowMutations = trowMutations
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRow(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = mutateRow_result()
result.read(iprot)
iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def getScannerResults(self, table, tscan, numRows):
"""
Get results for the provided TScan object.
This helper function opens a scanner, get the results and close the scanner.
@return between zero and numRows TResults
Parameters:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
- numRows: number of rows to return
"""
self.send_getScannerResults(table, tscan, numRows)
return self.recv_getScannerResults()
def send_getScannerResults(self, table, tscan, numRows):
self._oprot.writeMessageBegin('getScannerResults', TMessageType.CALL, self._seqid)
args = getScannerResults_args()
args.table = table
args.tscan = tscan
args.numRows = numRows
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getScannerResults(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getScannerResults_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getScannerResults failed: unknown result")
def getRegionLocation(self, table, row, reload):
"""
Given a table and a row get the location of the region that
would contain the given row key.
reload = true means the cache will be cleared and the location
will be fetched from meta.
Parameters:
- table
- row
- reload
"""
self.send_getRegionLocation(table, row, reload)
return self.recv_getRegionLocation()
def send_getRegionLocation(self, table, row, reload):
self._oprot.writeMessageBegin('getRegionLocation', TMessageType.CALL, self._seqid)
args = getRegionLocation_args()
args.table = table
args.row = row
args.reload = reload
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRegionLocation(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getRegionLocation_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRegionLocation failed: unknown result")
def getAllRegionLocations(self, table):
"""
Get all of the region locations for a given table.
Parameters:
- table
"""
self.send_getAllRegionLocations(table)
return self.recv_getAllRegionLocations()
def send_getAllRegionLocations(self, table):
self._oprot.writeMessageBegin('getAllRegionLocations', TMessageType.CALL, self._seqid)
args = getAllRegionLocations_args()
args.table = table
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getAllRegionLocations(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getAllRegionLocations_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getAllRegionLocations failed: unknown result")
def checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it mutates the row.
@return true if the row was mutated, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- compareOp: comparison to make on the value
- value: the expected value to be compared against, if not provided the
check is for the non-existence of the column in question
- rowMutations: row mutations to execute if the value matches
"""
self.send_checkAndMutate(table, row, family, qualifier, compareOp, value, rowMutations)
return self.recv_checkAndMutate()
def send_checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations):
self._oprot.writeMessageBegin('checkAndMutate', TMessageType.CALL, self._seqid)
args = checkAndMutate_args()
args.table = table
args.row = row
args.family = family
args.qualifier = qualifier
args.compareOp = compareOp
args.value = value
args.rowMutations = rowMutations
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndMutate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = checkAndMutate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndMutate failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["exists"] = Processor.process_exists
self._processMap["existsAll"] = Processor.process_existsAll
self._processMap["get"] = Processor.process_get
self._processMap["getMultiple"] = Processor.process_getMultiple
self._processMap["put"] = Processor.process_put
self._processMap["checkAndPut"] = Processor.process_checkAndPut
self._processMap["putMultiple"] = Processor.process_putMultiple
self._processMap["deleteSingle"] = Processor.process_deleteSingle
self._processMap["deleteMultiple"] = Processor.process_deleteMultiple
self._processMap["checkAndDelete"] = Processor.process_checkAndDelete
self._processMap["increment"] = Processor.process_increment
self._processMap["append"] = Processor.process_append
self._processMap["openScanner"] = Processor.process_openScanner
self._processMap["getScannerRows"] = Processor.process_getScannerRows
self._processMap["closeScanner"] = Processor.process_closeScanner
self._processMap["mutateRow"] = Processor.process_mutateRow
self._processMap["getScannerResults"] = Processor.process_getScannerResults
self._processMap["getRegionLocation"] = Processor.process_getRegionLocation
self._processMap["getAllRegionLocations"] = Processor.process_getAllRegionLocations
self._processMap["checkAndMutate"] = Processor.process_checkAndMutate
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_exists(self, seqid, iprot, oprot):
args = exists_args()
args.read(iprot)
iprot.readMessageEnd()
result = exists_result()
try:
result.success = self._handler.exists(args.table, args.tget)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("exists", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_existsAll(self, seqid, iprot, oprot):
args = existsAll_args()
args.read(iprot)
iprot.readMessageEnd()
result = existsAll_result()
try:
result.success = self._handler.existsAll(args.table, args.tgets)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("existsAll", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get(self, seqid, iprot, oprot):
args = get_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_result()
try:
result.success = self._handler.get(args.table, args.tget)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getMultiple(self, seqid, iprot, oprot):
args = getMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = getMultiple_result()
try:
result.success = self._handler.getMultiple(args.table, args.tgets)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getMultiple", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_put(self, seqid, iprot, oprot):
args = put_args()
args.read(iprot)
iprot.readMessageEnd()
result = put_result()
try:
self._handler.put(args.table, args.tput)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("put", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndPut(self, seqid, iprot, oprot):
args = checkAndPut_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndPut_result()
try:
result.success = self._handler.checkAndPut(args.table, args.row, args.family, args.qualifier, args.value, args.tput)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("checkAndPut", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_putMultiple(self, seqid, iprot, oprot):
args = putMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = putMultiple_result()
try:
self._handler.putMultiple(args.table, args.tputs)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("putMultiple", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteSingle(self, seqid, iprot, oprot):
args = deleteSingle_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteSingle_result()
try:
self._handler.deleteSingle(args.table, args.tdelete)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteSingle", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteMultiple(self, seqid, iprot, oprot):
args = deleteMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteMultiple_result()
try:
result.success = self._handler.deleteMultiple(args.table, args.tdeletes)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteMultiple", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndDelete(self, seqid, iprot, oprot):
args = checkAndDelete_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndDelete_result()
try:
result.success = self._handler.checkAndDelete(args.table, args.row, args.family, args.qualifier, args.value, args.tdelete)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("checkAndDelete", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_increment(self, seqid, iprot, oprot):
args = increment_args()
args.read(iprot)
iprot.readMessageEnd()
result = increment_result()
try:
result.success = self._handler.increment(args.table, args.tincrement)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("increment", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_append(self, seqid, iprot, oprot):
args = append_args()
args.read(iprot)
iprot.readMessageEnd()
result = append_result()
try:
result.success = self._handler.append(args.table, args.tappend)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("append", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_openScanner(self, seqid, iprot, oprot):
args = openScanner_args()
args.read(iprot)
iprot.readMessageEnd()
result = openScanner_result()
try:
result.success = self._handler.openScanner(args.table, args.tscan)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("openScanner", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getScannerRows(self, seqid, iprot, oprot):
args = getScannerRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = getScannerRows_result()
try:
result.success = self._handler.getScannerRows(args.scannerId, args.numRows)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TIllegalArgument as ia:
msg_type = TMessageType.REPLY
result.ia = ia
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getScannerRows", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_closeScanner(self, seqid, iprot, oprot):
args = closeScanner_args()
args.read(iprot)
iprot.readMessageEnd()
result = closeScanner_result()
try:
self._handler.closeScanner(args.scannerId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except TIllegalArgument as ia:
msg_type = TMessageType.REPLY
result.ia = ia
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("closeScanner", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRow(self, seqid, iprot, oprot):
args = mutateRow_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRow_result()
try:
self._handler.mutateRow(args.table, args.trowMutations)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("mutateRow", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getScannerResults(self, seqid, iprot, oprot):
args = getScannerResults_args()
args.read(iprot)
iprot.readMessageEnd()
result = getScannerResults_result()
try:
result.success = self._handler.getScannerResults(args.table, args.tscan, args.numRows)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getScannerResults", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRegionLocation(self, seqid, iprot, oprot):
args = getRegionLocation_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRegionLocation_result()
try:
result.success = self._handler.getRegionLocation(args.table, args.row, args.reload)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getRegionLocation", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getAllRegionLocations(self, seqid, iprot, oprot):
args = getAllRegionLocations_args()
args.read(iprot)
iprot.readMessageEnd()
result = getAllRegionLocations_result()
try:
result.success = self._handler.getAllRegionLocations(args.table)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getAllRegionLocations", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndMutate(self, seqid, iprot, oprot):
args = checkAndMutate_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndMutate_result()
try:
result.success = self._handler.checkAndMutate(args.table, args.row, args.family, args.qualifier, args.compareOp, args.value, args.rowMutations)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except TIOError as io:
msg_type = TMessageType.REPLY
result.io = io
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("checkAndMutate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class exists_args:
"""
Attributes:
- table: the table to check on
- tget: the TGet to check for
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'tget', (TGet, TGet.thrift_spec), None, ), # 2
)
def __init__(self, table=None, tget=None,):
self.table = table
self.tget = tget
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tget = TGet()
self.tget.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tget is not None:
oprot.writeFieldBegin('tget', TType.STRUCT, 2)
self.tget.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tget is None:
raise TProtocol.TProtocolException(message='Required field tget is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tget)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class existsAll_args:
"""
Attributes:
- table: the table to check on
- tgets: a list of TGets to check for
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.LIST, 'tgets', (TType.STRUCT,(TGet, TGet.thrift_spec)), None, ), # 2
)
def __init__(self, table=None, tgets=None,):
self.table = table
self.tgets = tgets
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.tgets = []
(_etype129, _size126) = iprot.readListBegin()
for _i130 in xrange(_size126):
_elem131 = TGet()
_elem131.read(iprot)
self.tgets.append(_elem131)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('existsAll_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tgets is not None:
oprot.writeFieldBegin('tgets', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.tgets))
for iter132 in self.tgets:
iter132.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tgets is None:
raise TProtocol.TProtocolException(message='Required field tgets is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tgets)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class existsAll_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.BOOL,None), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype136, _size133) = iprot.readListBegin()
for _i137 in xrange(_size133):
_elem138 = iprot.readBool()
self.success.append(_elem138)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('existsAll_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.BOOL, len(self.success))
for iter139 in self.success:
oprot.writeBool(iter139)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_args:
"""
Attributes:
- table: the table to get from
- tget: the TGet to fetch
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'tget', (TGet, TGet.thrift_spec), None, ), # 2
)
def __init__(self, table=None, tget=None,):
self.table = table
self.tget = tget
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tget = TGet()
self.tget.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tget is not None:
oprot.writeFieldBegin('tget', TType.STRUCT, 2)
self.tget.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tget is None:
raise TProtocol.TProtocolException(message='Required field tget is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tget)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TResult, TResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getMultiple_args:
"""
Attributes:
- table: the table to get from
- tgets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.LIST, 'tgets', (TType.STRUCT,(TGet, TGet.thrift_spec)), None, ), # 2
)
def __init__(self, table=None, tgets=None,):
self.table = table
self.tgets = tgets
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.tgets = []
(_etype143, _size140) = iprot.readListBegin()
for _i144 in xrange(_size140):
_elem145 = TGet()
_elem145.read(iprot)
self.tgets.append(_elem145)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tgets is not None:
oprot.writeFieldBegin('tgets', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.tgets))
for iter146 in self.tgets:
iter146.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tgets is None:
raise TProtocol.TProtocolException(message='Required field tgets is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tgets)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getMultiple_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TResult, TResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype150, _size147) = iprot.readListBegin()
for _i151 in xrange(_size147):
_elem152 = TResult()
_elem152.read(iprot)
self.success.append(_elem152)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getMultiple_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter153 in self.success:
iter153.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class put_args:
"""
Attributes:
- table: the table to put data in
- tput: the TPut to put
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'tput', (TPut, TPut.thrift_spec), None, ), # 2
)
def __init__(self, table=None, tput=None,):
self.table = table
self.tput = tput
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tput = TPut()
self.tput.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('put_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tput is not None:
oprot.writeFieldBegin('tput', TType.STRUCT, 2)
self.tput.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tput is None:
raise TProtocol.TProtocolException(message='Required field tput is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tput)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class put_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('put_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndPut_args:
"""
Attributes:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tput: the TPut to put if the check succeeds
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'family', None, None, ), # 3
(4, TType.STRING, 'qualifier', None, None, ), # 4
(5, TType.STRING, 'value', None, None, ), # 5
(6, TType.STRUCT, 'tput', (TPut, TPut.thrift_spec), None, ), # 6
)
def __init__(self, table=None, row=None, family=None, qualifier=None, value=None, tput=None,):
self.table = table
self.row = row
self.family = family
self.qualifier = qualifier
self.value = value
self.tput = tput
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.qualifier = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.value = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.tput = TPut()
self.tput.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndPut_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeString(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 4)
oprot.writeString(self.qualifier)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 5)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.tput is not None:
oprot.writeFieldBegin('tput', TType.STRUCT, 6)
self.tput.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocol.TProtocolException(message='Required field row is unset!')
if self.family is None:
raise TProtocol.TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocol.TProtocolException(message='Required field qualifier is unset!')
if self.tput is None:
raise TProtocol.TProtocolException(message='Required field tput is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.row)
value = (value * 31) ^ hash(self.family)
value = (value * 31) ^ hash(self.qualifier)
value = (value * 31) ^ hash(self.value)
value = (value * 31) ^ hash(self.tput)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndPut_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndPut_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class putMultiple_args:
"""
Attributes:
- table: the table to put data in
- tputs: a list of TPuts to commit
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.LIST, 'tputs', (TType.STRUCT,(TPut, TPut.thrift_spec)), None, ), # 2
)
def __init__(self, table=None, tputs=None,):
self.table = table
self.tputs = tputs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.tputs = []
(_etype157, _size154) = iprot.readListBegin()
for _i158 in xrange(_size154):
_elem159 = TPut()
_elem159.read(iprot)
self.tputs.append(_elem159)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('putMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tputs is not None:
oprot.writeFieldBegin('tputs', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.tputs))
for iter160 in self.tputs:
iter160.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tputs is None:
raise TProtocol.TProtocolException(message='Required field tputs is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tputs)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class putMultiple_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('putMultiple_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteSingle_args:
"""
Attributes:
- table: the table to delete from
- tdelete: the TDelete to delete
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'tdelete', (TDelete, TDelete.thrift_spec), None, ), # 2
)
def __init__(self, table=None, tdelete=None,):
self.table = table
self.tdelete = tdelete
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tdelete = TDelete()
self.tdelete.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteSingle_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tdelete is not None:
oprot.writeFieldBegin('tdelete', TType.STRUCT, 2)
self.tdelete.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tdelete is None:
raise TProtocol.TProtocolException(message='Required field tdelete is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tdelete)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteSingle_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteSingle_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteMultiple_args:
"""
Attributes:
- table: the table to delete from
- tdeletes: list of TDeletes to delete
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.LIST, 'tdeletes', (TType.STRUCT,(TDelete, TDelete.thrift_spec)), None, ), # 2
)
def __init__(self, table=None, tdeletes=None,):
self.table = table
self.tdeletes = tdeletes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.tdeletes = []
(_etype164, _size161) = iprot.readListBegin()
for _i165 in xrange(_size161):
_elem166 = TDelete()
_elem166.read(iprot)
self.tdeletes.append(_elem166)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tdeletes is not None:
oprot.writeFieldBegin('tdeletes', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.tdeletes))
for iter167 in self.tdeletes:
iter167.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tdeletes is None:
raise TProtocol.TProtocolException(message='Required field tdeletes is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tdeletes)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteMultiple_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TDelete, TDelete.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype171, _size168) = iprot.readListBegin()
for _i172 in xrange(_size168):
_elem173 = TDelete()
_elem173.read(iprot)
self.success.append(_elem173)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteMultiple_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter174 in self.success:
iter174.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndDelete_args:
"""
Attributes:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- tdelete: the TDelete to execute if the check succeeds
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'family', None, None, ), # 3
(4, TType.STRING, 'qualifier', None, None, ), # 4
(5, TType.STRING, 'value', None, None, ), # 5
(6, TType.STRUCT, 'tdelete', (TDelete, TDelete.thrift_spec), None, ), # 6
)
def __init__(self, table=None, row=None, family=None, qualifier=None, value=None, tdelete=None,):
self.table = table
self.row = row
self.family = family
self.qualifier = qualifier
self.value = value
self.tdelete = tdelete
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.qualifier = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.value = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.tdelete = TDelete()
self.tdelete.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndDelete_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeString(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 4)
oprot.writeString(self.qualifier)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 5)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.tdelete is not None:
oprot.writeFieldBegin('tdelete', TType.STRUCT, 6)
self.tdelete.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocol.TProtocolException(message='Required field row is unset!')
if self.family is None:
raise TProtocol.TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocol.TProtocolException(message='Required field qualifier is unset!')
if self.tdelete is None:
raise TProtocol.TProtocolException(message='Required field tdelete is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.row)
value = (value * 31) ^ hash(self.family)
value = (value * 31) ^ hash(self.qualifier)
value = (value * 31) ^ hash(self.value)
value = (value * 31) ^ hash(self.tdelete)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndDelete_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndDelete_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class increment_args:
"""
Attributes:
- table: the table to increment the value on
- tincrement: the TIncrement to increment
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'tincrement', (TIncrement, TIncrement.thrift_spec), None, ), # 2
)
def __init__(self, table=None, tincrement=None,):
self.table = table
self.tincrement = tincrement
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tincrement = TIncrement()
self.tincrement.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('increment_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tincrement is not None:
oprot.writeFieldBegin('tincrement', TType.STRUCT, 2)
self.tincrement.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tincrement is None:
raise TProtocol.TProtocolException(message='Required field tincrement is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tincrement)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class increment_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TResult, TResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('increment_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class append_args:
"""
Attributes:
- table: the table to append the value on
- tappend: the TAppend to append
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'tappend', (TAppend, TAppend.thrift_spec), None, ), # 2
)
def __init__(self, table=None, tappend=None,):
self.table = table
self.tappend = tappend
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tappend = TAppend()
self.tappend.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('append_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tappend is not None:
oprot.writeFieldBegin('tappend', TType.STRUCT, 2)
self.tappend.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tappend is None:
raise TProtocol.TProtocolException(message='Required field tappend is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tappend)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class append_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TResult, TResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('append_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class openScanner_args:
"""
Attributes:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'tscan', (TScan, TScan.thrift_spec), None, ), # 2
)
def __init__(self, table=None, tscan=None,):
self.table = table
self.tscan = tscan
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tscan = TScan()
self.tscan.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('openScanner_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tscan is not None:
oprot.writeFieldBegin('tscan', TType.STRUCT, 2)
self.tscan.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tscan is None:
raise TProtocol.TProtocolException(message='Required field tscan is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tscan)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class openScanner_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('openScanner_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getScannerRows_args:
"""
Attributes:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'scannerId', None, None, ), # 1
(2, TType.I32, 'numRows', None, 1, ), # 2
)
def __init__(self, scannerId=None, numRows=thrift_spec[2][4],):
self.scannerId = scannerId
self.numRows = numRows
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.scannerId = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.numRows = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getScannerRows_args')
if self.scannerId is not None:
oprot.writeFieldBegin('scannerId', TType.I32, 1)
oprot.writeI32(self.scannerId)
oprot.writeFieldEnd()
if self.numRows is not None:
oprot.writeFieldBegin('numRows', TType.I32, 2)
oprot.writeI32(self.numRows)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.scannerId is None:
raise TProtocol.TProtocolException(message='Required field scannerId is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.scannerId)
value = (value * 31) ^ hash(self.numRows)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getScannerRows_result:
"""
Attributes:
- success
- io
- ia: if the scannerId is invalid
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TResult, TResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (TIllegalArgument, TIllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype178, _size175) = iprot.readListBegin()
for _i179 in xrange(_size175):
_elem180 = TResult()
_elem180.read(iprot)
self.success.append(_elem180)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = TIllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getScannerRows_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter181 in self.success:
iter181.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
value = (value * 31) ^ hash(self.ia)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class closeScanner_args:
"""
Attributes:
- scannerId: the Id of the Scanner to close *
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'scannerId', None, None, ), # 1
)
def __init__(self, scannerId=None,):
self.scannerId = scannerId
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.scannerId = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('closeScanner_args')
if self.scannerId is not None:
oprot.writeFieldBegin('scannerId', TType.I32, 1)
oprot.writeI32(self.scannerId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.scannerId is None:
raise TProtocol.TProtocolException(message='Required field scannerId is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.scannerId)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class closeScanner_result:
"""
Attributes:
- io
- ia: if the scannerId is invalid
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (TIllegalArgument, TIllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = TIllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('closeScanner_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.io)
value = (value * 31) ^ hash(self.ia)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRow_args:
"""
Attributes:
- table: table to apply the mutations
- trowMutations: mutations to apply
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'trowMutations', (TRowMutations, TRowMutations.thrift_spec), None, ), # 2
)
def __init__(self, table=None, trowMutations=None,):
self.table = table
self.trowMutations = trowMutations
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.trowMutations = TRowMutations()
self.trowMutations.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRow_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.trowMutations is not None:
oprot.writeFieldBegin('trowMutations', TType.STRUCT, 2)
self.trowMutations.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.trowMutations is None:
raise TProtocol.TProtocolException(message='Required field trowMutations is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.trowMutations)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRow_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRow_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getScannerResults_args:
"""
Attributes:
- table: the table to get the Scanner for
- tscan: the scan object to get a Scanner for
- numRows: number of rows to return
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'tscan', (TScan, TScan.thrift_spec), None, ), # 2
(3, TType.I32, 'numRows', None, 1, ), # 3
)
def __init__(self, table=None, tscan=None, numRows=thrift_spec[3][4],):
self.table = table
self.tscan = tscan
self.numRows = numRows
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.tscan = TScan()
self.tscan.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.numRows = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getScannerResults_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.tscan is not None:
oprot.writeFieldBegin('tscan', TType.STRUCT, 2)
self.tscan.write(oprot)
oprot.writeFieldEnd()
if self.numRows is not None:
oprot.writeFieldBegin('numRows', TType.I32, 3)
oprot.writeI32(self.numRows)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.tscan is None:
raise TProtocol.TProtocolException(message='Required field tscan is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.tscan)
value = (value * 31) ^ hash(self.numRows)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getScannerResults_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TResult, TResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype185, _size182) = iprot.readListBegin()
for _i186 in xrange(_size182):
_elem187 = TResult()
_elem187.read(iprot)
self.success.append(_elem187)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getScannerResults_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter188 in self.success:
iter188.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRegionLocation_args:
"""
Attributes:
- table
- row
- reload
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.BOOL, 'reload', None, None, ), # 3
)
def __init__(self, table=None, row=None, reload=None,):
self.table = table
self.row = row
self.reload = reload
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.reload = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRegionLocation_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.reload is not None:
oprot.writeFieldBegin('reload', TType.BOOL, 3)
oprot.writeBool(self.reload)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocol.TProtocolException(message='Required field row is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.row)
value = (value * 31) ^ hash(self.reload)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRegionLocation_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (THRegionLocation, THRegionLocation.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = THRegionLocation()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRegionLocation_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getAllRegionLocations_args:
"""
Attributes:
- table
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
)
def __init__(self, table=None,):
self.table = table
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getAllRegionLocations_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getAllRegionLocations_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(THRegionLocation, THRegionLocation.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype192, _size189) = iprot.readListBegin()
for _i193 in xrange(_size189):
_elem194 = THRegionLocation()
_elem194.read(iprot)
self.success.append(_elem194)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getAllRegionLocations_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter195 in self.success:
iter195.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndMutate_args:
"""
Attributes:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- compareOp: comparison to make on the value
- value: the expected value to be compared against, if not provided the
check is for the non-existence of the column in question
- rowMutations: row mutations to execute if the value matches
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'family', None, None, ), # 3
(4, TType.STRING, 'qualifier', None, None, ), # 4
(5, TType.I32, 'compareOp', None, None, ), # 5
(6, TType.STRING, 'value', None, None, ), # 6
(7, TType.STRUCT, 'rowMutations', (TRowMutations, TRowMutations.thrift_spec), None, ), # 7
)
def __init__(self, table=None, row=None, family=None, qualifier=None, compareOp=None, value=None, rowMutations=None,):
self.table = table
self.row = row
self.family = family
self.qualifier = qualifier
self.compareOp = compareOp
self.value = value
self.rowMutations = rowMutations
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.qualifier = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.compareOp = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.value = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.rowMutations = TRowMutations()
self.rowMutations.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndMutate_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeString(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 4)
oprot.writeString(self.qualifier)
oprot.writeFieldEnd()
if self.compareOp is not None:
oprot.writeFieldBegin('compareOp', TType.I32, 5)
oprot.writeI32(self.compareOp)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 6)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.rowMutations is not None:
oprot.writeFieldBegin('rowMutations', TType.STRUCT, 7)
self.rowMutations.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocol.TProtocolException(message='Required field row is unset!')
if self.family is None:
raise TProtocol.TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocol.TProtocolException(message='Required field qualifier is unset!')
if self.compareOp is None:
raise TProtocol.TProtocolException(message='Required field compareOp is unset!')
if self.rowMutations is None:
raise TProtocol.TProtocolException(message='Required field rowMutations is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
value = (value * 31) ^ hash(self.row)
value = (value * 31) ^ hash(self.family)
value = (value * 31) ^ hash(self.qualifier)
value = (value * 31) ^ hash(self.compareOp)
value = (value * 31) ^ hash(self.value)
value = (value * 31) ^ hash(self.rowMutations)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndMutate_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndMutate_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.io)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
nilq/baby-python
|
python
|
import random
from cocos.actions import Move, CallFunc, Delay
from cocos.layer import Layer, director
from cocos.sprite import Sprite
import cocos.collision_model as CollisionModel
from app import gVariables
from app.audioManager import SFX
class Enemy(Layer):
def __init__(self):
super(Enemy, self).__init__()
def set(self, gScene):
self.gScene = gScene
self.R = gScene.R # adding resources
self.batch = gScene.batch # batch object
self.player = gScene.PLAYER # player sprite
self.collisionManager = gScene.collisionManager
# Enemy Lists
self.enemy_lists = set()
# Schedule Timer
self.schedule_interval(self.generateEnemyLists, 1) # Generate enemy every 2 second
self.schedule(self.checkForCollision)
def generateEnemyLists(self, dt):
if self.player.is_playing:
index = random.randint(0, 3)
EO = EnemyObject((self, index))
self.collisionManager.add(EO)
self.batch.add(EO)
self.enemy_lists.add(EO)
def checkForCollision(self, dt):
eOBJ = set()
for enemyObj in self.enemy_lists:
if enemyObj.isDead == False:
enemyObj.cshape.center = enemyObj.position
collisions = self.collisionManager.objs_colliding(enemyObj)
if collisions:
if self.player.PLAYER in collisions:
enemyObj.die(True)
self.player.getHit()
if enemyObj.position[0] < 0 - enemyObj.width:
enemyObj.visible = False
if enemyObj.visible == False:
eOBJ.add(enemyObj)
#delete the set obj
for obj in eOBJ:
self.enemy_lists.remove(obj)
class EnemyObject(Sprite):
def __init__(self, e):
super(EnemyObject, self).__init__(e[0].R.ENEMY[e[1]])
#X(axis)-Location for enemy
self.e = e
self.isDead = False
self.scale = 0.7
self.position = (director._window_virtual_width,
random.randint(30,director._window_virtual_height - 34 - self.height/2))
self.velocity = (-100, 0)
self.deadtemplate = Delay(0.5) + CallFunc(self.destroy)
self.do(Move())
#Collision Shape
self.cshape = CollisionModel.AARectShape(self.position, self.width/2, self.height/2)
def die(self, collidewithplayer=False):
try:
if gVariables.g_IS_FX:
SFX(self.e[0].R._SFX[1])
if collidewithplayer:
self.e[0].gScene.HUD.sLists[self.e[0].gScene.PLAYER.total_lives - 1].visible = False
self.e[0].gScene.collisionManager.remove_tricky(self)
self.e[0].player.total_kill +=1
self.image = self.e[0].R.EFFECT[0]
self.isDead = True
self.velocity = (0, 0)
self.do(self.deadtemplate)
except:
print "ERR"
def destroy(self):
self.visible = False
|
nilq/baby-python
|
python
|
# SPDX-License-Identifier: Apache-2.0
"""
Tests pipeline within pipelines.
"""
from textwrap import dedent
import unittest
from io import StringIO
import numpy as np
import pandas
try:
from sklearn.compose import ColumnTransformer
except ImportError:
# not available in 0.19
ColumnTransformer = None
try:
from sklearn.impute import SimpleImputer
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (
MinMaxScaler, RobustScaler, StandardScaler, OneHotEncoder)
from sklearn.feature_extraction.text import CountVectorizer
from skl2onnx import convert_sklearn, to_onnx
from skl2onnx.common.data_types import FloatTensorType, StringTensorType
from test_utils import dump_data_and_model, TARGET_OPSET
class TestSklearnPipelineWithinPipeline(unittest.TestCase):
def test_pipeline_pca_pipeline_minmax(self):
model = Pipeline(
memory=None,
steps=[
(
"PCA",
PCA(
copy=True,
iterated_power="auto",
n_components=0.15842105263157896,
random_state=None,
tol=0.0,
svd_solver="auto",
whiten=False,
),
),
(
"Pipeline",
Pipeline(
memory=None,
steps=[(
"MinMax scaler",
MinMaxScaler(
copy=True,
feature_range=(0, 3.7209871159509307),
),
)],
),
),
],
)
data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)
y = [0, 0, 1, 1]
model.fit(data, y)
model_onnx = convert_sklearn(
model,
"pipelinewithinpipeline",
[("input", FloatTensorType(data.shape))],
target_opset=TARGET_OPSET
)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
data,
model,
model_onnx,
basename="SklearnPipelinePcaPipelineMinMax",
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
def test_pipeline_pca_pipeline_none_lin(self):
model = Pipeline(
memory=None,
steps=[
(
"PCA",
PCA(
copy=True,
iterated_power="auto",
n_components=0.15842105263157896,
random_state=None,
tol=0.0,
svd_solver="auto",
whiten=False,
),
),
(
"Pipeline",
Pipeline(
memory=None,
steps=[
(
"MinMax scaler",
MinMaxScaler(
copy=True,
feature_range=(0, 3.7209871159509307),
),
),
("logreg", LogisticRegression(solver="liblinear")),
],
),
),
],
)
data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)
y = [0, 0, 1, 1]
model.fit(data, y)
model_onnx = convert_sklearn(
model,
"pipelinewithinpipeline",
[("input", FloatTensorType(data.shape))],
target_opset=TARGET_OPSET
)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
data,
model,
model_onnx,
basename="SklearnPipelinePcaPipelineMinMaxLogReg",
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
def test_pipeline_pca_pipeline_multinomial(self):
model = Pipeline(
memory=None,
steps=[
(
"PCA",
PCA(
copy=True,
iterated_power="auto",
n_components=2,
random_state=None,
svd_solver="auto",
tol=0.0,
whiten=False,
),
),
(
"Pipeline",
Pipeline(
memory=None,
steps=[
(
"MinMax scaler",
MinMaxScaler(
copy=True,
feature_range=(0, 3.7209871159509307),
),
),
(
"MultinomialNB",
MultinomialNB(
alpha=0.7368421052631579,
class_prior=None,
fit_prior=True,
),
),
],
),
),
],
)
data = np.array(
[[0, 0, 0], [0, 0, 0.1], [1, 1, 1.1], [1, 1.1, 1]],
dtype=np.float32,
)
y = [0, 0, 1, 1]
model.fit(data, y)
model_onnx = convert_sklearn(
model,
"pipelinewithinpipeline",
[("input", FloatTensorType(data.shape))],
target_opset=TARGET_OPSET
)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
data,
model,
model_onnx,
basename="SklearnPipelinePcaPipelineMinMaxNB2",
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
def test_pipeline_pca_pipeline_multinomial_none(self):
model = Pipeline(
memory=None,
steps=[
(
"PCA",
PCA(
copy=True,
iterated_power="auto",
n_components=0.15842105263157896,
random_state=None,
tol=0.0,
svd_solver="auto",
whiten=False,
),
),
(
"Pipeline",
Pipeline(
memory=None,
steps=[
(
"MinMax scaler",
MinMaxScaler(
copy=True,
feature_range=(0, 3.7209871159509307),
),
),
(
"MultinomialNB",
MultinomialNB(
alpha=0.7368421052631579,
class_prior=None,
fit_prior=True,
),
),
],
),
),
],
)
data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)
y = [0, 0, 1, 1]
model.fit(data, y)
model_onnx = convert_sklearn(
model,
"pipelinewithinpipeline",
[("input", FloatTensorType(data.shape))],
target_opset=TARGET_OPSET
)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
data,
model,
model_onnx,
basename="SklearnPipelinePcaPipelineMinMaxNBNone",
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
@unittest.skipIf(
ColumnTransformer is None,
reason="ColumnTransformer not available in 0.19")
def test_pipeline_column_transformer_pipeline_imputer_scaler_lr(self):
X = np.array([[1, 2], [3, np.nan], [3, 0]], dtype=np.float32)
y = np.array([1, 0, 1])
model = Pipeline([
(
"ct",
ColumnTransformer([
(
"pipeline1",
Pipeline([
("imputer", SimpleImputer()),
("scaler", StandardScaler()),
]),
[0],
),
(
"pipeline2",
Pipeline([
("imputer", SimpleImputer()),
("scaler", RobustScaler()),
]),
[1],
),
]),
),
("lr", LogisticRegression(solver="liblinear")),
])
model.fit(X, y)
model_onnx = convert_sklearn(
model,
"pipelinewithinpipeline",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnPipelineCTPipelineImputerScalerLR",
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
@unittest.skipIf(
ColumnTransformer is None,
reason="ColumnTransformer not available in 0.19")
def test_complex_pipeline(self):
df = pandas.read_csv(StringIO(dedent("""
CAT1,CAT2,TEXT
A,M,clean
B,N,text
A,M,cleaning
B,N,normalizing""")))
X_train = df
y_train = np.array([[1, 0, 1, 0], [1, 0, 1, 0]]).T
categorical_features = ['CAT1', 'CAT2']
textual_feature = 'TEXT'
preprocessor = ColumnTransformer(
transformers=[
('cat_transform', OneHotEncoder(handle_unknown='ignore'),
categorical_features),
('count_vector', Pipeline(steps=[
('count_vect', CountVectorizer(
max_df=0.8, min_df=0.05, max_features=1000))]),
textual_feature)])
preprocessor.fit(X_train, y_train)
initial_type = [('CAT1', StringTensorType([None, 1])),
('CAT2', StringTensorType([None, 1])),
('TEXTs', StringTensorType([None, 1]))]
with self.assertRaises(RuntimeError):
to_onnx(preprocessor, initial_types=initial_type,
target_opset=TARGET_OPSET)
initial_type = [('CAT1', StringTensorType([None, 1])),
('CAT2', StringTensorType([None, 1])),
('TEXT', StringTensorType([None, 1]))]
onx = to_onnx(preprocessor, initial_types=initial_type,
target_opset=TARGET_OPSET)
dump_data_and_model(
X_train, preprocessor, onx,
basename="SklearnPipelineComplex")
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
'''
Created on 09.10.2017
@author: Henrik Pilz
'''
from xml.sax import make_parser
from datamodel import Feature, FeatureSet, Mime, OrderDetails, Price, PriceDetails, Product, ProductDetails, Reference, TreatmentClass
from exporter.xml.bmecatExporter import BMEcatExporter
from importer.xml.bmecatImportHandler import BMEcatImportHandler
from resolver import DTDResolver
from test.handler.basicHandlerTest import BasicHandlerTest
class XmlTransformationNonStrictValidationTest(BasicHandlerTest):
def testCreateBMEcatFullData(self):
article = Product()
article.productId = '12345'
article.details = ProductDetails()
article.details.deliveryTime = 10
article.details.description = 'Test Description\nTest Description Line 2 '
article.details.ean = '12345678901234'
article.details.keywords = [ 'Keyword 1', 'Keyword 2']
article.details.manufacturerArticleId = '09876'
article.details.manufacturerName = 'Manufacturer'
article.details.articleStatus = "Bla"
tc = TreatmentClass()
tc.classType = 'TestClass'
tc.value = '12345'
article.details.specialTreatmentClasses = [ tc ]
article.details.title = ' Test Article '
article.details.supplierAltId = '23456'
reference = Reference()
reference.referenceType = 'accessory'
reference.supplierArticleId = '09876'
article.addReference(reference)
# Bilder
mime = Mime()
mime.mimeType = 'image/jpg'
mime.order = 1
mime.purpose = 'detail'
mime.source = 'manufacturer/Test.jpg'
article.addMime(mime)
mime = Mime()
mime.mimeType = 'image/jpg'
mime.order = 2
mime.purpose = 'detail'
mime.source = 'manufacturer/Test2.jpg'
article.addMime(mime)
# LieferDetails
article.orderDetails = OrderDetails()
article.orderDetails.contentUnit = 'C62'
article.orderDetails.orderUnit = 'C62'
article.orderDetails.packingQuantity = 25
article.orderDetails.priceQuantity = 100
article.orderDetails.quantityMin = 4
article.orderDetails.quantityInterval = 1
# Preise
priceDetails = PriceDetails()
price1 = Price()
price1.amount = 10.50
price1.priceType = 'net_customer'
price1.lowerBound = 1
price1.tax = 0.19
priceDetails.addPrice(price1)
price2 = Price()
price2.amount = 17.50
price2.priceType = 'net_list'
price2.lowerBound = 1
price2.tax = 0.19
priceDetails.addPrice(price2)
article.addPriceDetails(priceDetails)
# Attribute
featureSet = FeatureSet()
feature = Feature()
feature.name = "Test1"
feature.addValue(10)
featureSet.addFeature(feature)
feature = Feature()
feature.name = "Test2"
feature.addValue("Blabla")
featureSet.addFeature(feature)
feature = Feature()
feature.name = "Test3"
feature.addValue("Blub")
featureSet.addFeature(feature)
feature = Feature()
feature.name = "Test4"
feature.addValue("Zack")
featureSet.addFeature(feature)
article.addFeatureSet(featureSet)
self.runAndCheck(article, 'testCreateBMEcatFullData.xml', 'nonstrict')
def testCreateBMEcatMinimumDataPlusKeywords(self):
article = Product()
article.productId = '12345'
article.details = ProductDetails()
article.details.title = 'Test Article'
article.orderDetails = OrderDetails()
article.orderDetails.contentUnit = 'C62'
article.orderDetails.orderUnit = 'C62'
article.orderDetails.packingQuantity = 25
article.orderDetails.priceQuantity = 100
article.orderDetails.quantityMin = 4
article.orderDetails.quantityInterval = 1
priceDetails = PriceDetails()
price = Price()
price.amount = 10.50
price.priceType = 'net_customer'
price.lowerBound = 1
price.tax = 0.19
priceDetails.addPrice(price)
article.addPriceDetails(priceDetails)
article.addKeyword("Testkeyword")
self.runAndCheck(article, 'testCreateBMEcatMinimumDataPlusKeywords.xml', 'nonstrict')
def testCreateBMEcatMinimumDataFloatDescription(self):
article = Product()
article.productId = '12345'
article.details = ProductDetails()
article.details.title = 'Test Article'
article.details.description = 123.567
article.orderDetails = OrderDetails()
article.orderDetails.contentUnit = 'C62'
article.orderDetails.orderUnit = 'C62'
article.orderDetails.packingQuantity = 25
article.orderDetails.priceQuantity = 100
article.orderDetails.quantityMin = 4
article.orderDetails.quantityInterval = 1
priceDetails = PriceDetails()
price = Price()
price.amount = 10.50
price.priceType = 'net_customer'
price.lowerBound = 1
price.tax = 0.19
priceDetails.addPrice(price)
article.addPriceDetails(priceDetails)
self.runAndCheck(article, 'testCreateBMEcatMinimumDataFloatDescription.xml', 'nonstrict')
def testCreateBMEcatMinimumData(self):
article = Product()
article.productId = '12345'
article.details = ProductDetails()
article.details.title = 'Test Article'
article.orderDetails = OrderDetails()
article.orderDetails.contentUnit = 'C62'
article.orderDetails.orderUnit = 'C62'
article.orderDetails.packingQuantity = 25
article.orderDetails.priceQuantity = 100
article.orderDetails.quantityMin = 4
article.orderDetails.quantityInterval = 1
priceDetails = PriceDetails()
price = Price()
price.amount = 10.50
price.priceType = 'net_customer'
price.lowerBound = 1
price.tax = 0.19
priceDetails.addPrice(price)
article.addPriceDetails(priceDetails)
self.runAndCheck(article, 'testCreateBMEcatMinimumData.xml', 'nonstrict')
def runTestMethod(self, article, filename, validation='nonstrict'):
articles = { 'new' : [ article ]}
# export
bmecatExporter = BMEcatExporter(articles, filename, validation)
bmecatExporter.writeBMEcatAsXML()
# import again
parser = make_parser()
importHandler = BMEcatImportHandler("%Y-%m-%d")
parser.setContentHandler(importHandler)
parser.setEntityResolver(DTDResolver())
parser.parse("file:" + filename)
return importHandler.articles['new']
# if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
# unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Written by Daniel Oaks <daniel@danieloaks.net>
# Released under the ISC license
import unittest
from girc import formatting
class FormattingTestCase(unittest.TestCase):
"""Tests our formatting."""
def setUp(self):
errmsg = 'formatting.{} does not exist!'
self.assertTrue(formatting.escape, msg=errmsg.format('escape'))
self.assertTrue(formatting.unescape, msg=errmsg.format('unescape'))
def test_removing_formatting(self):
self.assertEqual(formatting.remove_formatting_codes('Lol \x03cool \x032tests\x0f!', irc=True),
'Lol cool tests!')
self.assertEqual(formatting.remove_formatting_codes('Lol $c[]cool $c[blue]tests$r!'),
'Lol cool tests!')
self.assertEqual(formatting.remove_formatting_codes('Lol $ccoo$c3,15l $c12,15tests$r!$$y'),
'Lol cool tests!$y')
self.assertEqual(formatting.remove_formatting_codes('Lol co${yolo}ol ${$}tests!$'),
'Lol cool $tests!')
def test_colour_codes(self):
self.assertEqual(formatting._ctos(5), 'brown')
self.assertEqual(formatting._ctos(452), 'unknown: 452')
def test_escaping(self):
self.assertEqual(formatting.escape('Strawberries are \x02cool\x0f'),
'Strawberries are $bcool$r')
self.assertEqual(formatting.escape('Such \x1dcool\x1d things\x02!\x0f'),
'Such $icool$i things$b!$r')
self.assertEqual(formatting.escape('Lol \x03cool \x032tests\x0f!'),
'Lol $c[]cool $c[blue]tests$r!')
self.assertEqual(formatting.escape('Lol cool\x03'),
'Lol cool$c[]')
self.assertEqual(formatting.escape('Lol \x034cool \x032,tests\x0f!'),
'Lol $c[red]cool $c[blue],tests$r!')
self.assertEqual(formatting.escape('\x02Lol \x034,2cool \x033,8tests\x0f!'),
'$bLol $c[red,blue]cool $c[green,yellow]tests$r!')
def test_unescaping(self):
self.assertEqual(formatting.unescape('Strawberries are $$cool$r'),
'Strawberries are $cool\x0f')
self.assertEqual(formatting.unescape('Strawberries are $bcool$r'),
'Strawberries are \x02cool\x0f')
self.assertEqual(formatting.unescape('Such $icool$i things$b!$r'),
'Such \x1dcool\x1d things\x02!\x0f')
self.assertEqual(formatting.unescape('How cool$c'),
'How cool\x03')
self.assertEqual(formatting.unescape('Lol $c[red]cool $c[blue]tests$r!'),
'Lol \x034cool \x032tests\x0f!')
self.assertEqual(formatting.unescape('$bLol $c[red,blue]cool $c[green,yellow]tests$r!'),
'\x02Lol \x034,2cool \x033,8tests\x0f!')
# testing custom unescaping function
def custom_unescape(*args, **kwargs):
return '{}-{}'.format(','.join(args),
','.join('{}:{}'.format(k, v) for k, v in kwargs.items()))
extra_dict = {
'custom': [custom_unescape, ['r', 't'], {'34': 'dfg'}],
}
self.assertEqual(formatting.unescape('lolo[${custom}]', extra_format_dict=extra_dict),
'lolo[r,t-34:dfg]')
extra_dict = {
'custom': [custom_unescape, ['wer', 'hgd']],
}
self.assertEqual(formatting.unescape('fff--${custom}]', extra_format_dict=extra_dict),
'fff--wer,hgd-]')
extra_dict = {
'custom': [custom_unescape],
}
self.assertEqual(formatting.unescape('abcd=${custom}=', extra_format_dict=extra_dict),
'abcd=-=')
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# 2017 vby
############################ vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
#---------------------------------------------------------------------------------------------------
import sys
import os, errno
import math
import collections
import subprocess
import json
import random
import pdb
from main.network.psn import PSN
from main.na_utils import *
from .tmodel import tmodel
from .nacommon import *
def loadjson(filename):
import json
with open(filename, 'r') as fh:
return json.load(fh)
#---------------------------------------------------------------------------------------------------
from collections import namedtuple
Edge = namedtuple("Edge", ["src", "dst", "fpp", "nop"])
Annotation = namedtuple("annotation", ["name", "lineno", "level"])
Stmt = namedtuple("Stmt", ["taskname", "annotation"])
class EdgeM(object):
def __init__(self, src, dst, fpp, nop):
self.src = src
self.dst = dst
self.fpp = fpp
self.nop = nop
def __repr__(self):
return '{} {} {} {}'.format(self.src, self.dst, self.nop, self.fpp)
class amodel(object):
def __init__(self, nafile, nadefsfile, toolroot, types, hwkdecls, tasks, taskgroups, tinstances_unexpanded, tdefs_original, sysargs):
self.args = sysargs
self.toolroot = toolroot
self.nafile_path = None
self.nafile_postcpp = nafile
self.namacros_file = nadefsfile
self.types = types
self.hwkdecls = hwkdecls
self.tasks = tasks
self.taskgroups = taskgroups
self.tinstances_unexpanded = tinstances_unexpanded
self.tdefs_original = tdefs_original
self.tmodels = []
self.type_table = collections.OrderedDict()
self.typetags = collections.OrderedDict()
self.interfpga_links = []
self.psn = PSN(sysargs)
self.global_task_map = collections.OrderedDict()
self.task_partition_map = collections.OrderedDict()
self.original_taskmap_json = collections.OrderedDict()
self.hls_bviwrappers_outdir = None
"""
some default internal options
"""
# use explicit fifo buffering for flit-i/o between host and the network
self.use_buffering_tofrom_host = False
if self.args.buffered_sr_ports:
self.use_buffering_tofrom_host = True
self.buffer_sizing_specs = collections.OrderedDict()
"""
Generate a task graph for use with the taskgraph
version0: basic
- nodes are tasks
- for edges
foreach task, collect tuples ('send', destination, flits_per_packet, number_of_packets)
version1:
- nodes are tasks
- for edges, consider
"""
def get_task_communication_graph_skeleton(self):
gl = []
for tm in self.tmodels:
dl = tm.get_unique_message_destinations()
for d in dl:
gl.append(EdgeM(src=tm.taskname, dst=d, fpp=0, nop=0))
return gl
def taskgraph_gen(self):
taskgraph_outdir = os.path.join(self.outdir, "taskgraph")
"""
------------ Generate graph.txt ---------------------------------------
"""
G = []
allarcs = self.get_all_communication_arcs()
# for tm in self.tmodels:
# if tm.is_marked_off_chip:
# # TODO handle later in a meaningful way
# continue
# info1 = tm.get_send_class_statement_info1()
# for send_class, _, syminfo, destinations_,nodeobj in info1:
# destinations = list(map(tm.resolve_address, destinations_))
# """
# TODO: after TLV send
# """
# if send_class == 'send':
# for dst in destinations:
# # each struct is a packet, and entire array is sent by default
# # flits per packet
# fpp = self.get_flits_in_type(syminfo.typename)
# # number of packets
# nop = syminfo.arraysize
# if not nodeobj.fullrange():
# nop = nodeobj.length - nodeobj.offset;
# e = Edge(src=tm.taskname, dst=dst, fpp=fpp, nop=nop)
# G.append(e)
# elif send_class == 'scatter':
# for dst in destinations:
# # each struct is a packet
# # flits per packet
# fpp = self.get_flits_in_type(syminfo.typename)
# # array is sliced into len(destinations) and sent
# # number of packets
# nop = syminfo.arraysize/len(destinations)
# if not nodeobj.fullrange():
# nop = (nodeobj.length - nodeobj.offset)/len(destinations);
# e = Edge(src=tm.taskname, dst=dst, fpp=fpp, nop=nop)
# G.append(e)
# elif send_class == 'broadcast':
# pass
# else:
# raise CompilationError("Not implemented yet")
# def to_graph_txt(G):
# lines = []
# lines.append(len(self.tmodels))
# lines.append(len(G))
# lines.append(' '.join([x.taskname for x in self.tmodels]))
# for e in G:
# comm_vol_in_flits = e.fpp * e.nop
# lines.append('{} {} {} {} {}'.format(e.src, e.dst, comm_vol_in_flits, e.lineno, e.level))
# return lines
def merge_allarcs_into_tasklevel_arcs(all_arcs, skel_arcs):
for skarc in skel_arcs:
for a in all_arcs:
if (a.src.taskname, a.dst.taskname) == (skarc.src, skarc.dst):
skarc.fpp = a.fpp
skarc.nop += a.nop
return skel_arcs
def to_graph_txt(G, merged=False):
lines = []
lines.append(len(self.tmodels))
lines.append(len(G))
lines.append(' '.join([x.taskname for x in self.tmodels]))
if not merged:
for e in G:
comm_vol_in_flits = e.fpp * e.nop
lines.append('{} {} {}\t{} {} {}\t{} {} {}'.format(e.src.taskname, e.dst.taskname,
comm_vol_in_flits,
e.src.annotation.lineno, e.src.annotation.level, e.src.annotation.name,
e.dst.annotation.lineno, e.dst.annotation.level, e.dst.annotation.name
))
with open (os.path.join(taskgraph_outdir, 'graph_all.txt'), 'w') as fh:
fh.write('\n'.join([str(x) for x in lines]))
else:
for e in G:
comm_vol_in_flits = e.fpp * e.nop
lines.append('{} {} {}'.format(e.src, e.dst, comm_vol_in_flits))
with open (os.path.join(taskgraph_outdir, 'graph.txt'), 'w') as fh:
fh.write('\n'.join([str(x) for x in lines]))
return lines
G = merge_allarcs_into_tasklevel_arcs(allarcs, self.get_task_communication_graph_skeleton())
trymkdir(taskgraph_outdir)
ll = to_graph_txt(G, merged=True)
llnew = to_graph_txt(allarcs, merged=False)
"""
------------ Generate config.json ---------------------------------------
"""
cfg = {}
cfg['nocpath'] = self.psn.dir
cfg['flitwidth_override'] = self.flit_width
cfg['drop_precedence_constraints'] = False
cfg['num_tasks_per_router_bound'] = 1
cfg['objective'] = 'both'
cfg['gurobi_timelimit'] = 60*10
if self.psn.is_connect():
cfg['noctype'] = 'connect'
elif self.psn.is_fnoc():
cfg['noctype'] = 'fnoc'
else:
pass
with open(os.path.join(taskgraph_outdir, "config.json"), "w") as oh:
json.dump(cfg, oh, indent=4)
"""
------------ Generate specs.json ---------------------------------------
"""
from collections import namedtuple
tasknames = [x.taskname for x in self.tmodels]
KernelInfo = namedtuple("KernelInfo", ["name","energy", "duration"])
kspecs = {}
if self.args.kernel_specs_file:
kspecs = loadjson(self.args.kernel_specs_file)
def get_task_kernel_list(task):
if kspecs:
f1 = KernelInfo(name="f1", energy=2, duration=kspecs[task])
else:
f1 = KernelInfo(name="f1", energy=2, duration=2)
return [f1._asdict()]
dict = {}
dict["energy_cost_per_bit"] = 0.05
dict["initial_map"] = {}
dict["hop_latency"] = 1
dict["cycles_per_pkt"] = 3.0/2
if self.psn.is_fnoc():
dict['hop_latency'] = 3
dict['cycles_per_pkt'] = 8.0/2
dict['task_kernels'] = {task:get_task_kernel_list(task) for task in tasknames}
with open(os.path.join(taskgraph_outdir, "specs.json"), "w") as oh:
json.dump(dict, oh, indent=4)
@property
def enabled_lateral_data_io(self):
return self.args.enable_lateral_bulk_io
def has_scs_type(self, SCSNAME):
for tm in self.tmodels:
for k, v in tm.symbol_table.items():
if v.storage_class == SCSNAME:
return True
return False
def get_vhls_portname(self, typename, instancename):
# self.type_table[typename].xxx
if len(self.type_table[typename].member_info_tuples)==1 and (self.type_table[typename].basictypes[0]):
if (self.type_table[typename].basictypes[0][:3] != 'ap_'):
return instancename + '_' + self.type_table[typename].member_info_tuples[0][0]
if len(self.type_table[typename].member_info_tuples) == 1:
mname = self.type_table[typename].member_info_tuples[0][0]
# _V when member_info_tuples[0][1] >= 32, but let's see
if mname[-1] == '_':
return instancename + '_' + self.type_table[typename].member_info_tuples[0][0] + 'V'
else:
return instancename + '_' + self.type_table[typename].member_info_tuples[0][0] + '_V'
return instancename
@property
def taskmap_json_file(self):
return self.args.taskmap_json_file
def all_instances_of_type(self, tmodel):
return [tm1 for tm1 in self.tmodels if tm1.taskdefname == tmodel.taskdefname]
def taskmap(self, taskname):
#return self.global_task_map[taskname]
if taskname in self.global_task_map:
return self.global_task_map[taskname]
else:
# TODO neater
if taskname == '@return':
return 'saved_source_address'
else:
return taskname
def get_lone_scemi_port_id(self): # tmpfix
l = self.get_tasks_marked_for_exposing_flit_SR_ports()
if len(l) == 1:
return l[0][0]
else:
return 2
def has_nonhls_kernels(self):
for d in self.hwkdecls:
if not (d.tq == '__vivadohls__'):
return True
return False
def trace_state_entry_exit(self):
if self.args.simverbosity == 'state-entry-exit':
return True
return False
"""
assuming ./mainout/{src,sim,...}
gen ./mainout/bviwrappers/ if na has hlspes, or well, regardless
gen ./${bsvkernels}
"""
def make_wrapper_dirs(self):
mainout = self.outdir
hlsbvidir = os.path.join(mainout, "bviwrappers")
self.hls_bviwrappers_outdir = hlsbvidir
trymkdir(hlsbvidir)
mainout_par = os.path.join(mainout, os.pardir)
bsvkernels="bsvwrappers"
bsvkernels = os.path.join(mainout_par, bsvkernels)
if self.args.kernelwrapper_outdir:
bsvkernels = self.args.kernelwrapper_outdir
self.pelib_dir = bsvkernels
if self.has_nonhls_kernels():
trymkdir(bsvkernels)
if self.args.vhlswrap_outdir:
#self.vhlswrappergen_dir = os.path.join(os.path.dirname(self.nafile_path), self.args.vhlswrap_outdir)
self.vhlswrappergen_dir = self.args.vhlswrap_outdir
# if not os.path.exists(self.pelib_dir):
# raise ValueError(self.pelib_dir,
# """does not exist, please create explicitly or specify a
# directory with a switch
# """)
# VHLS directory
if self.args.vhlswrap_outdir:
trymkdir(self.args.vhlswrap_outdir)
@property
def hls_source_directory_abspath(self):
pass
@property
def out_scriptdir(self):
return os.path.join(self.outdir, 'tcl')
@property
def out_simdir(self):
return os.path.join(self.outdir, 'sim')
@property
def out_swmodeldir(self):
return os.path.join(self.outdir, 'mpimodel')
def prepare_outdir_layout(self):
# SETUP OUTDIR LAYOUT
trymkdir(os.path.join(self.outdir, 'ispecs'))
trymkdir(os.path.join(self.outdir, 'src'))
trymkdir(os.path.join(self.outdir, 'tb'))
trymkdir(self.out_simdir)
trymkdir(os.path.join(self.outdir, 'data'))
trymkdir(os.path.join(self.outdir, 'libs'))
trymkdir(os.path.join(self.outdir, 'fpga'))
trymkdir(os.path.join(self.outdir, 'libna'))
trymkdir(os.path.join(self.outdir, 'scemi'))
trymkdir(self.out_swmodeldir)
if self.args.scemi:
trymkdir(os.path.join(self.outdir, 'tbscemi'))
trymkdir(self.out_scriptdir)
if self.psn.is_connect():
force_symlink(self.psn.dir, os.path.join(self.outdir, 'connect'))
if self.psn.is_fnoc():
force_symlink(self.psn.dir, os.path.join(self.outdir, 'forthnoc'))
#force_symlink(os.path.join(self.toolroot, 'libs'), os.path.join(self.outdir, 'libs'))
force_symlink(os.path.join(self.toolroot, 'libs/bsv'), os.path.join(self.outdir, 'libs/bsv'))
if self.has_scs_type('__ram__') or self.has_scs_type('__mbus__'):
force_symlink(os.path.join(self.toolroot, 'libs/bsv_reserve'), os.path.join(self.outdir, 'libs/bsv_reserve'))
force_symlink(os.path.join(self.toolroot, 'libs/verilog'), os.path.join(self.outdir, 'libs/verilog'))
force_symlink(os.path.join(self.toolroot, 'libs/xdc'), os.path.join(self.outdir, 'libs/xdc'))
#force_symlink(os.path.join(self.toolroot, 'libs/libna'), os.path.join(self.outdir, 'libs/libna'))
force_symlink(os.path.join(self.toolroot, 'libs/vhls_include'), os.path.join(self.outdir, 'libs/vhls_include'))
self.make_wrapper_dirs()
# Write taskmap json file
with open(os.path.join(self.out_simdir, 'taskmap.json'), 'w') as fo:
json.dump(self.global_task_map, fp=fo, indent=4)
# Dump the mfpga_taskmap.json too
if self.task_partition_map:
with open(os.path.join(self.out_simdir, 'original_taskmap.json'), 'w') as fo:
json.dump(self.original_taskmap_json, fp=fo, indent=4)
with open(os.path.join(self.out_simdir, 'mfpga_taskmap.json'), 'w') as fo:
json.dump(self.task_partition_map, fp=fo, indent=4)
#readback = json.load(open('OUT_CGEN/src/taskmap.json'))
with open(os.path.join(self.out_simdir, 'typetags.json'), 'w') as fo:
json.dump(self.typetags, fp=fo, indent=4)
def setup(self):
self.nafile_path = self.args.nafile
trymkdir(self.outdir)
# Types
#
self.type_table = collections.OrderedDict()
for t in self.types:
self.type_table[t.struct_name] = t
# Typetags
self.typetags = collections.OrderedDict()
for i, t in enumerate(self.type_table.keys()):
self.typetags[t] = i
# Hwkernels
#
# Tasks
#
self.tmodels = [tmodel(t) for t in self.tasks]
for tm in self.tmodels:
tm.setup()
tm._gam = self
if self.taskmap_json_file and os.path.exists(self.taskmap_json_file):
self.global_task_map, self.task_partition_map = self.parse_taskmap_json(self.taskmap_json_file)
# Add the interfpga link tasks to tmodels
if self.has_tasks_marked_for_xfpga:
link_tasks = self.get_interfpga_link_tasks()
link_tmodels = [tmodel((None, t)) for t in link_tasks]
for tm in link_tmodels:
tm.setup()
tm._gam = self
self.tmodels.extend(link_tmodels)
# task groups using a task instance array name as proxy for all instances, we expand
def find_name_in_tmodels(name):
if name in [x.taskname for x in self.tmodels]:
return True
def find_if_a_taskinstance_array_name(name):
tms_with_array_decl = [t for t in self.tmodels if t.instanceparams and t.instanceparams.num_task_instances]
# we have instance tasks that have been defined as arrays
# we check if name matches any of these tasknames MINUS the _%d suffix
for t in tms_with_array_decl:
abc = t.taskname
if abc[:abc.rfind('_')] == name:
# found, so all the array instances should be accounted for, and sent
account_for = t.instanceparams.num_task_instances
for t in tms_with_array_decl:
abc = t.taskname
abc = abc[:abc.rfind('_')]
if abc == name:
account_for=account_for - 1
if account_for == 0:
return True, t.instanceparams.num_task_instances
for k, v in self.taskgroups.items():
for name in v.tasknamelist:
if not find_name_in_tmodels(name):
found, count = find_if_a_taskinstance_array_name(name)
if found:
v.tasknamelist.remove(name)
v.tasknamelist.extend(["{}_{}".format(name, idx) for idx in range(count)])
self.set_a_task_map()
# TODO temporary arrangement
# 1. broadcast: assign address_list; to be done after task map
# 2. recv from @any or @customgroup_name
for tm in self.tmodels:
tm.setup_broadcast_stmts()
tm.setup_recv_taskgroup_stmts()
tm.setup_send_taskgroup_stmts()
tm.setup_scatter_taskgroup_stmts()
tm.setup_gather_taskgroup_stmts()
tm.setup_barrier_group_resolution()
tm.setup_pragma_recvs_sends_declarations()
def get_interfpga_link_tasks(self):
ifpga_tdl = []
from main.nac import task_definition
for link in self.original_taskmap_json['interfpga_links']:
(fromfpga, fromnode), (tofpga, tonode) = link.items()
qualifiers = ['xfpga']
fromlink_tname = '{}_{}'.format(fromfpga, fromnode)
tolink_tname = '{}_{}'.format(tofpga, tonode)
td = task_definition( (None, fromlink_tname, qualifiers) )
ifpga_tdl.append(td)
td = task_definition( (None, tolink_tname, qualifiers) )
ifpga_tdl.append(td)
return ifpga_tdl
@property
def number_user_send_ports(self):
return int(self.psn.params['NUM_USER_SEND_PORTS'])
@property
def flit_width(self):
return int(self.psn.params['FLIT_DATA_WIDTH'])
@property
def unused_flit_header_bitcount(self):
if self.psn.is_fnoc():
# For FNOC we reserve self.number_user_send_ports for use with broadcast/multicast feature
return self.flit_width - self.number_user_send_ports - self.get_network_address_width() - self.get_typetags_count_width() - 2 # 2 bits for bcast or multicast indicator
elif self.psn.is_connect():
return self.flit_width - self.get_network_address_width() - self.get_typetags_count_width()
def sanitychecks(self):
# CHECK: whether flit width is enough to accomodate the `header flit'
assert self.unused_flit_header_bitcount >= 0, "FLIT_WIDTH unsufficient to hold the header flit, should at least be {}".format(-self.unused_flit_header_bitcount+self.flit_width)
pass
def hwkernelname2modname(self, k):
return k[0].upper()+k[1:]
def hwmodname2kernelname(self, k):
return k[0].lower()+k[1:]
def get_network_address_width(self):
nnodes = self.number_user_send_ports
addr_width = int(math.ceil(math.log(nnodes, 2)))
if 'FORCE_ADDRWIDTH' in self.psn.params:
#print("Using FORCE_ADDRWIDTH")
return self.psn.params['FORCE_ADDRWIDTH']
return addr_width
def getBitWidth(self, count):
return int(max(1, int(math.ceil(math.log(count, 2)))))
def get_typetags_count_width(self):
ntags = len(self.typetags)
return self.getBitWidth(ntags)
def getranges_tag_and_sourceaddr_info_in_flit(self):
fw = self.flit_width
nnodes = self.number_user_send_ports
addr_width = int(math.ceil(math.log(nnodes, 2)))
ntags = len(self.typetags)
tag_width = int(max(1, int(math.ceil(math.log(ntags, 2)))))
tag_range = str(addr_width+tag_width-1)+':'+str(addr_width)
sourceaddr_range = str(addr_width-1)+':0';
opts_width = 4
opts_range = str(opts_width+tag_width+addr_width-1)+':'+str(addr_width+tag_width)
assert addr_width + tag_width + opts_width <= fw, " #endpoints_addr_width + ln(#ntypes) <= FLIT_DATA_WIDTH "
return (tag_range, sourceaddr_range, opts_range)
def typename2tag(self, typename):
if typename in self.typetags:
return self.typetags[typename]
else:
pdb.set_trace()
raise CompilationError("Unknown type %s" % typename)
def parse_taskmap_json(self, taskmap_json_file):
self.original_taskmap_json = collections.OrderedDict(json.load(open(self.taskmap_json_file)))
x = collections.OrderedDict(json.load(open(self.taskmap_json_file)))
if 'header' in x:
hdr = x.pop('header')
if hdr['multifpga']:
interfpga_links = x.pop('interfpga_links')
print("xfpgaLinks:", interfpga_links)
rmap = collections.OrderedDict()
for k,v in x.items():
rmap.update(v)
# introduce interfpga link tasks
for link in interfpga_links:
(fromfpga, fromnode), (tofpga, tonode) = link.items()
fromlink_tname = '{}_{}'.format(fromfpga, fromnode)
tolink_tname = '{}_{}'.format(tofpga, tonode)
rmap[fromlink_tname] = fromnode
rmap[tolink_tname] = tonode
# add to the partition specific map too
x[fromfpga][fromlink_tname] = fromnode
x[tofpga][tolink_tname] = tonode
self.interfpga_links.append((fromfpga, fromnode, tofpga, tonode))
return rmap, x
else:
return x, {}
return x, {}
def set_a_task_map(self):
if self.taskmap_json_file and os.path.exists(self.taskmap_json_file):
# PARSED earlier
#self.global_task_map, self.task_partition_map = self.parse_taskmap_json(self.taskmap_json_file)
#collections.OrderedDict(json.load(open(self.taskmap_json_file)))
#X self.global_task_map[self.tmodels[0].taskname] = 0
# off_chip tagged nodes are no special, whatever the taskmap says
# but should be on the boundaries ideally for phy.impl
for tm in self.tmodels:
tm.mapped_to_node = self.global_task_map[tm.taskname]
#tm.mapped_to_node = self.taskmap[tm.taskname]
else:
# some random assignment
if not self.args.taskmap_use_random:
random.seed(11) # CONNECT was misbaving for some some shuffles
nplaces = int(self.psn.params['NUM_USER_SEND_PORTS'])
# no special nodes as far as random mapping is concerned
l = [i for i in range(0, nplaces)] # let 0 be the special node, fixed for now
random.shuffle(l)
for i, tm in enumerate(self.tmodels):
tm.mapped_to_node = l[i]
self.global_task_map[tm.taskname] = l[i]
if None: # TODO review
l = [i for i in range(1, nplaces)] # let 0 be the special node, fixed for now
random.shuffle(l)
self.tmodels[0].mapped_to_node = 0 # redundant, TODO remove
self.global_task_map[self.tmodels[0].taskname] = 0
for i, tm in enumerate(self.tmodels[1:]): # except 0
tm.mapped_to_node = l[i]
self.global_task_map[tm.taskname] = l[i]
@property
def outdir(self):
return self.args.cgenoutdir
@property
def taskmap_json_file(self):
return self.args.taskmap_json_file
def get_project_sha(self): # TODO move
def is_git_directory(path = '.'):
return subprocess.call(['git', '-C', path, 'status'], stderr=subprocess.STDOUT, stdout = open(os.devnull, 'w')) == 0
def get_repo_sha(repo):
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=repo).decode('ascii').strip()
return sha
return 'disabled-sha'
return get_repo_sha(self.toolroot)
for subdir in os.listdir('.'): # TODO WHAT WAS THIS?!
if is_git_directory(subdir):
return get_repo_sha(subdir)
assert False
def has_off_chip_nodes(self):
return len(self.get_off_chip_node_id_list())>0
def get_tasks_marked_for_exposing_flit_SR_ports(self):
ll = []
for t in self.tmodels:
if t.is_marked_EXPOSE_AS_SR_PORT:
ll.append((t.mapped_to_node, t.taskname, t.off_chip_qualifier))
return ll
def get_tasks_marked_for_exposing_quasiserdes_sr_ports(self):
ll = []
for t in self.tmodels:
if t.is_marked_EXPOSE_AS_XFPGA_SERDES_PORT:
ll.append((t.mapped_to_node, t.taskname, t.off_chip_qualifier))
return ll
def get_off_chip_node_id_list(self):
ll = []
for t in self.tmodels:
if t.is_marked_off_chip:
ll.append((t.mapped_to_node, t.taskname, t.off_chip_qualifier))
return ll
@property
def has_tasks_marked_for_xfpga(self):
if self.task_partition_map:
return True
return False
def has_tasks_with_qualifier(self, qualname):
for t in self.tmodels:
if t.qualifiers:
if qualname in t.qualifiers:
return True
return False
def get_max_parcel_size(self):
return 512-512%int(self.flit_width)
def get_flits_in_type(self, ty):
return self.get_struct_member_index_ranges_wrt_flitwidth(ty)[0]
def get_type_size_in_bits(self, ty):
ty_size = 0
for n, z, az in self.type_table[ty].member_info_tuples:
z = z*az
ty_size += z
return ty_size
def get_struct_member_start_pos_for_MPItypes(self, ty):
d = collections.OrderedDict()
ty_size = 0
startpos = 0
ll = list()
for n, z, az,mtype in self.type_table[ty].member_n_z_az_ty:
if mtype not in self.basic_type_list:
if z <= 64:
z = 64
else:
raise NotSupportedException("nonbasic types longer than 64b not presently supported for MPI model")
z = z*az
ty_size += z
endpos = startpos + z - 1
#ll.append((endpos, startpos, n, az))
ll.append(startpos)
startpos = endpos + 1
return ll
def get_struct_member_index_ranges_wrt_flitwidth(self, ty):
d = collections.OrderedDict()
fpaylwidth = int(self.psn.params["FLIT_DATA_WIDTH"])
ty_size = 0
startpos = 0
ll = list()
for n, z, az in self.type_table[ty].member_info_tuples:
z = z*az
ty_size += z
endpos = startpos + z - 1
ll.append((endpos, startpos, n, az))
startpos = endpos + 1
totalFlits = int((ty_size+fpaylwidth-1)/fpaylwidth)
return (totalFlits, ll)
def get_bsv_lib_paths(self):
l = [self.hls_bviwrappers_outdir]
if self.has_nonhls_kernels():
l.append(self.pelib_dir)
return l
def get_buffersize_offchipnode(self):
return 64;
def find_tmodel_by_name(self, name):
if not [t for t in self.tmodels if t.taskname == name]:
pdb.set_trace()
[tm] = [t for t in self.tmodels if t.taskname == name]
return tm
def get_all_communication_arcs(self):
"""
SRC_stmt::(taskname, stmt_annotation, TypeName, transferAmount)
DST_stmt::(taskname, stmt_annotation, TypeName, transferAmount)
"""
def srpair_likely_match(src_taskname, s, r):
if src_taskname in r[3]:
if s[2].typename == r[2].typename:
# return True
if r[0] == 'recv' and s[0] == 'send':
cnd1 = s[4].fullrange() and (s[2].arraysize == r[2].arraysize)
# cnd2 = not s[4].fullrange() and ((s[4].length - s[4].offset) == (r[4].length - r[4].offset))
cnd2 = True
if cnd1 or cnd2:
return True
if not cnd2:
return False
return True
return False
srpairs = collections.OrderedDict()
for tm in self.tmodels:
srpairs[tm.taskname] = []
dl = tm.get_unique_message_destinations()
sl = tm.get_unique_message_sources()
send_class_stmts = tm.get_send_class_statement_info1()
if not send_class_stmts and dl: # the placeholder host task
for dst in dl:
dst_tm = self.find_tmodel_by_name(dst)
fl = filter(lambda x: tm.taskname in x[3], dst_tm.get_recv_class_statement_info1()) # TODO: let these get_recv/send_class info1 methods do the necessary work
for info_dst_side in fl:
# there are no actual send statements in this placeholder so we cook on up
reconstructed_src_copy = ('send', info_dst_side[1], info_dst_side[2], [dst_tm.taskname], None)
srpairs[tm.taskname].append((reconstructed_src_copy, info_dst_side, dst_tm.taskname))
for info in send_class_stmts:
dst_address_list = info[3]
for dst in dst_address_list:
dst_tm = self.find_tmodel_by_name(dst)
recv_class_stmts = dst_tm.get_recv_class_statement_info1()
if not recv_class_stmts:
reconstructed_dst_copy = ('recv', info[1], info[2], [tm.taskname], None)
srpairs[tm.taskname].append((info, reconstructed_dst_copy, dst_tm.taskname))
else:
fl = filter(lambda x: srpair_likely_match(tm.taskname, info, x), recv_class_stmts)
for info_dst_side in fl:
srpairs[tm.taskname].append((info, info_dst_side, dst_tm.taskname))
rl_srpairs = []
def _get_nop_fpp(snd, rcv):
info = snd
if not snd[4]: # reconstructed send for placerholder task
info = rcv
# flits per packet # TODO (packet size is fixed in terms of typesize)
fpp = self.get_flits_in_type(info[2].typename)
fpp = fpp + 1 # one header flit per packet
# number of packets
nop = info[2].arraysize
if not info[4].fullrange():
nop = info[4].length - info[4].offset;
return fpp, nop
for k, v in srpairs.items():
for snd, rcv, dst_taskname in v:
fpp, nop = _get_nop_fpp(snd, rcv)
def getAnnotation(stmt):
if not stmt:
return Annotation(name='none',lineno=0,level=0)
lno, lvl, name = stmt.get_annotations()[0];
return Annotation(name=name, lineno=lno, level=lvl)
e = Edge(src=Stmt(taskname=k, annotation=getAnnotation(snd[4])), dst=Stmt(taskname=dst_taskname, annotation=getAnnotation(rcv[4])), fpp=fpp, nop=nop)
#print(snd[4].get_annotations()[0], ' ==> ', rcv[4].get_annotations()[0], ' : ', dst_taskname)
rl_srpairs.append(e)
return rl_srpairs
def get_line_annotations(self):
d = collections.OrderedDict()
for t in self.tdefs_original:
ll = t.line_annotations()
for l in ll:
for e in l:
if e[0] in d:
d[e[0]].append(e)
else:
d[e[0]] = [e]
return d
def dump_line_annotations(self):
ispecs_dir = os.path.join(self.outdir, 'ispecs')
d = self.get_line_annotations()
with open(os.path.join(ispecs_dir, 'line_annotations.json'), 'w') as fh:
json.dump(d, fh, indent=4)
@property
def basic_type_list(self):
return na_basic_type_list.keys()
def to_mpi_typename(self, ty, width=None):
if ty in na_basic_type_list:
return na_basic_type_list[ty][1]
if width:
if width <= 64:
return 'MPI_UNSIGNED_LONG'
else:
raise NotSupportedException("nonbasic types longer than 64b not presently supported for MPI model")
#---------------------------------------------------------------------------------------------------
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
from trav_lib.data_prep import reduce_memory
def test_reduce_memory():
df = pd.DataFrame({'ints':[1,2,3,4],'floats':[.1,.2,.3,.4],'strings':['a','b','c','d']})
df2 = reduce_memory(df)
assert df2['ints'].dtype == np.dtype('int8')
assert df2['floats'].dtype == np.dtype('float32')
assert df2['strings'].dtype == np.dtype('O')
df = pd.DataFrame({'ints':[1,2,3,4],'floats':[.1,.2,.3,.4],'strings':['a','b','c','d']})
df3 = reduce_memory(df, cat_cols = ['strings'])
assert df3['ints'].dtype == np.dtype('int8')
assert df3['floats'].dtype == np.dtype('float32')
assert df3['strings'].dtype.name == 'category'
|
nilq/baby-python
|
python
|
from plotly.graph_objs import Ohlc
|
nilq/baby-python
|
python
|
'''
Created on May 28, 2015
@author: local
'''
import sys
import argparse
import logging
import subprocess
import os
import json
logging.getLogger("spectrumbrowser").disabled = True
def getProjectHome():
command = ['git', 'rev-parse', '--show-toplevel']
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return out.strip()
def setupConfig(host, configFile):
msodConfig = json.load(open(os.environ.get("HOME") +
"/.msod/MSODConfig.json"))
if "DB_DATA_DIR" in msodConfig:
mongoDir = msodConfig["DB_DATA_DIR"]
else:
mongoDir = getProjectHome() + "/data/db"
configuration = Config.parse_local_config_file(configFile)
configuration["HOST_NAME"] = host
configuration["CERT"] = getProjectHome() + "/devel/certificates/dummy.crt"
configuration["MONGO_DIR"] = mongoDir
Config.setSystemConfig(configuration)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process command line args')
parser.add_argument('-host', help='Host')
parser.add_argument('-f', help='config file')
args = parser.parse_args()
configFile = args.f
host = args.host
sys.path.append(getProjectHome() + "/flask")
import Config
setupConfig(host, configFile)
|
nilq/baby-python
|
python
|
import numpy as np
import pytest
from inspect import currentframe, getframeinfo
from pathlib import Path
from ..flarelc import FlareLightCurve
from ..lcio import from_K2SC_file
#example paths:
target1 = 'examples/hlsp_k2sc_k2_llc_210951703-c04_kepler_v2_lc.fits'
target2 = 'examples/hlsp_k2sc_k2_llc_211119999-c04_kepler_v2_lc.fits'
target3 = 'examples/hlsp_k2sc_k2_llc_211117077-c04_kepler_v2_lc.fits'
#From lightkurve
def test_invalid_lightcurve():
"""Invalid FlareLightCurves should not be allowed."""
err_string = ("Input arrays have different lengths."
" len(time)=5, len(flux)=4")
time = np.array([1, 2, 3, 4, 5])
flux = np.array([1, 2, 3, 4])
with pytest.raises(ValueError) as err:
FlareLightCurve(time=time, flux=flux)
assert err_string == err.value.args[0]
def test_find_gaps():
filename = getframeinfo(currentframe()).filename
p = Path(filename).resolve().parents[1]
lc = from_K2SC_file(p / 'examples/hlsp_k2sc_k2_llc_210951703-c04_kepler_v2_lc.fits')
lc.find_gaps()
assert lc.gaps == [(0, 2582), (2582, 3424)]
|
nilq/baby-python
|
python
|
# Copyright (c) Niall Asher 2022
from socialserver.util.test import (
test_db,
server_address,
create_post_with_request,
create_user_with_request,
create_user_session_with_request
)
from socialserver.constants import ErrorCodes
import requests
def test_get_unliked_post(test_db, server_address):
new_post_id = create_post_with_request(test_db.access_token)
r = requests.get(
f"{server_address}/api/v3/posts/single",
json={"post_id": new_post_id},
headers={"Authorization": f"Bearer {test_db.access_token}"},
)
assert r.status_code == 201
assert r.json()['meta']['user_likes_post'] == False
assert r.json()['post']['like_count'] == 0
def test_like_post(test_db, server_address):
new_post_id = create_post_with_request(test_db.access_token)
r = requests.post(f"{server_address}/api/v3/posts/like",
json={"post_id": new_post_id},
headers={"Authorization": f"Bearer {test_db.access_token}"})
assert r.status_code == 201
assert r.json()['liked'] == True
assert r.json()['like_count'] == 1
r = requests.get(
f"{server_address}/api/v3/posts/single",
json={"post_id": new_post_id},
headers={"Authorization": f"Bearer {test_db.access_token}"},
)
assert r.status_code == 201
assert r.json()['meta']['user_likes_post'] == True
assert r.json()['post']['like_count'] == 1
def test_unlike_post(test_db, server_address):
new_post_id = create_post_with_request(test_db.access_token)
r = requests.post(f"{server_address}/api/v3/posts/like",
json={"post_id": new_post_id},
headers={"Authorization": f"Bearer {test_db.access_token}"})
assert r.status_code == 201
assert r.json()['liked'] == True
assert r.json()['like_count'] == 1
r = requests.delete(
f"{server_address}/api/v3/posts/like",
json={"post_id": new_post_id},
headers={"Authorization": f"Bearer {test_db.access_token}"},
)
assert r.status_code == 200
assert r.json()['liked'] == False
assert r.json()['like_count'] == 0
def test_like_post_already_liked(test_db, server_address):
new_post_id = create_post_with_request(test_db.access_token)
r = requests.post(f"{server_address}/api/v3/posts/like",
json={"post_id": new_post_id},
headers={"Authorization": f"Bearer {test_db.access_token}"})
assert r.status_code == 201
assert r.json()['liked'] == True
assert r.json()['like_count'] == 1
r2 = requests.post(f"{server_address}/api/v3/posts/like",
json={"post_id": new_post_id},
headers={"Authorization": f"Bearer {test_db.access_token}"})
assert r2.status_code == 400
assert r2.json()["error"] == ErrorCodes.OBJECT_ALREADY_LIKED.value
def test_unlike_post_not_liked(test_db, server_address):
new_post_id = create_post_with_request(test_db.access_token)
r = requests.delete(f"{server_address}/api/v3/posts/like",
json={"post_id": new_post_id},
headers={"Authorization": f"Bearer {test_db.access_token}"})
assert r.status_code == 400
assert r.json()["error"] == ErrorCodes.OBJECT_NOT_LIKED.value
def test_like_post_does_not_exist(test_db, server_address):
r = requests.post(f"{server_address}/api/v3/posts/like",
json={"post_id": 1293812},
headers={"Authorization": f"Bearer {test_db.access_token}"})
assert r.status_code == 404
assert r.json()["error"] == ErrorCodes.POST_NOT_FOUND.value
def test_dislike_post_does_not_exist(test_db, server_address):
r = requests.delete(f"{server_address}/api/v3/posts/like",
json={"post_id": 1293812},
headers={"Authorization": f"Bearer {test_db.access_token}"})
assert r.status_code == 404
assert r.json()["error"] == ErrorCodes.POST_NOT_FOUND.value
|
nilq/baby-python
|
python
|
import sys
import struct
""" Takes data from the Android IMU app and turns it into binary data.
Data comes in as csv, data points will be turned into the format:
Time Stamp Accelerometer Gyroscope
x y z x y z
=========================================
0 1 2 3 4 5 6
"""
ANDROID_IMU_DATA_FORMAT_STRING = 'ddddddd'
HEADER_SIZE = 25
def main():
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]
with open(output_file_name, "wb") as out_file:
# write the format header
out_file.write(
ANDROID_IMU_DATA_FORMAT_STRING.ljust(HEADER_SIZE, ' ')
)
with open(input_file_name, "r") as in_file:
for line in in_file: # ??????????????? Is Ok? ??????????????????
clean_data = line_to_clean_data(line)
if clean_data:
out_file.write(
struct.pack(ANDROID_IMU_DATA_FORMAT_STRING, *clean_data)
)
in_file.close()
out_file.close()
def line_to_clean_data(line):
if not '4,' in line:
return None
else:
items_as_text = line.split(",")
if len(items_as_text) < 13: # expected number of items in line
return None
item_values = [float(x) for x in items_as_text]
data_items = [
item_values[0], # time stamp
item_values[2], # accelerometer x
item_values[3], # accelerometer y
item_values[4], # accelerometer z
item_values[6], # gyroscope x
item_values[7], # gyroscope y
item_values[8] # gyroscope z
]
return data_items
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from Compiler.types import *
from Compiler.instructions import *
from Compiler.util import tuplify,untuplify
from Compiler import instructions,instructions_base,comparison,program
import inspect,math
import random
import collections
from Compiler.library import *
from Compiler.types_gc import *
from operator import itemgetter
import numpy as np
def get_diff_types(data_list):
cint_data = [d for d in data_list if type(d) == cint]
pint_data = [(d, d.pid) for d in data_list if type(d) == pint]
sint_data = [d for d in data_list if type(d) == sint]
if len(pint_data) > 1:
pint_data = sorted(pint_data, key=itemgetter(1))
return (cint_data, pint_data, sint_data)
# This is not parallelized
def int_add(data_list, nparallel=1):
(cint_data, pint_data, sint_data) = get_diff_types(data_list)
c_res = cint(0)
for cd in cint_data:
c_res += cd
pd_res = []
current_pid = None
for (pd, pid) in pint_data:
if pid != current_pid:
current_pid = pid
pd_res.append(pint(0))
pd_res[-1] += pd
res = cint(0)
res += c_res
for pd in pd_res:
res += pd
for sd in sint_data:
res += sd
return res
def sum_lib(lst):
flattened_lst = []
for i in range(len(lst)):
print "TYPE?", type(lst[i])
if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC):
flattened_lst += flatten(lst[i])
print flattened_lst
else:
flattened_lst.append(lst[i])
return sum(flattened_lst)
def max_lib(lst):
flattened_lst = []
for i in range(len(lst)):
print "TYPE?", type(lst[i])
if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC):
flattened_lst += flatten(lst[i])
print flattened_lst
else:
flattened_lst.append(lst[i])
return max(flattened_lst)
def min_lib(lst):
flattened_lst = []
for i in range(len(lst)):
print "TYPE?", type(lst[i])
if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC):
flattened_lst += flatten(lst[i])
print flattened_lst
else:
flattened_lst.append(lst[i])
return min(flattened_lst)
def flatten(A):
lst = []
if type(A) in (sfixMatrix, sfixMatrixGC, cfixMatrix, cfixMatrixGC):
for i in range(A.rows):
for j in range(A.columns):
lst.append(A[i][j])
return lst
import functools
def reduce_lib(lst, reduce_fn):
flattened_lst = []
for i in range(len(lst)):
if type(lst[i]) in(sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC):
flattened_lst += flatten(lst[i])
else:
flattened_lst.append(lst[i])
return reduce(reduce_fn, flattened_lst)
# Copy a portion of the large matrix to the small matrix.
def copy_matrix(dest, src, rows, cols, index):
for i in range(rows):
for j in range(cols):
dest[i][j] = src[index * rows + j][j]
# Tree-based multiplication
def int_multiply(data_list, nparallel=2):
length = len(data_list)
data = []
data.append(Array(length, sint))
for i in range(length):
data[0][i] = data_list[i]
while length > 1:
length = (length / 2) + (length % 2)
data.append(Array(length, sint))
@for_range(length)
def f(i):
data[-1][i] = sint(0)
level = 0
for x in range(len(data) - 1):
print("level = {}, length = {}".format(level+1, data[level+1].length))
exec_len = data[level].length / 2
@for_range_multithread(nparallel, exec_len, exec_len)
def _multiply(i):
data[level+1][i] = data[level][2 * i] * data[level][2 * i + 1]
if data[level].length % 2 > 0:
data[level+1][data[level+1].length - 1] = data[level][data[level].length - 1]
level += 1
return data[-1][0]
def _transpose(A, B):
@for_range(A.rows)
def f(i):
@for_range(A.columns)
def g(j):
B[j][i] = A[i][j]
def _transpose_gc(A, B):
for i in range(A.rows):
for j in range(A.columns):
B[j][i] = A[i][j]
def transpose(A):
if isinstance(A, np.ndarray):
return A.transpose()
if not isinstance(A, (Matrix, MatrixGC)):
raise ValueError("Only matrix can be transposed")
if isinstance(A, (sintMatrix, sfixMatrix, cintMatrix, cfixMatrix)):
B = A.__class__(A.columns, A.rows)
_transpose(A, B)
return B
elif isinstance(A, (sintMatrixGC, sfixMatrixGC)):
B = A.__class__(A.columns, A.rows)
_transpose_gc(A, B)
return B
else:
raise NotImplementedError
def _matmul(A, B, C, D, int_type, nparallel=1):
total = A.rows * B.columns * A.columns
@for_range_multithread(nparallel, total, total)
def _multiply(i):
i_index = i / (B.columns * A.columns)
j_index = i % (B.columns * A.columns) / (A.columns)
k_index = i % A.columns
D[i] = A[i_index][k_index] * B[k_index][j_index]
@for_range_multithread(nparallel, A.rows * B.columns, A.rows * B.columns)
def _add(i):
i_index = i / B.columns
j_index = i % B.columns
C[i_index][j_index] = int_type(0)
@for_range(A.columns)
def _add_element(j):
C[i_index][j_index] += D[i * A.columns + j]
return C
# Not parallelized
def _matmul_mix(A, B, nparallel=1):
C = MixMatrix(A.rows, B.columns)
@for_range(A.rows * B.columns)
def f(i):
@for_range(A.columns)
def g(j):
v = C.get(i)
v += A.get(i * A.columns + j) * B.get(j * B.columns + i)
C.set(i, v)
return C
def _matmul_gc(A, B, C):
for i in range(A.rows):
for j in range(B.columns):
v = A[i][0] * B[0][j]
for k in range(1, A.columns):
v += A[i][k] * B[k][j]
C[i][j] = v
def matmul(A, B, left_rows, left_cols, right_rows, right_cols, mat_type, nparallel=1):
if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):
return np.matmul(A, B)
# Tentative, very janky. Yep, this doesn't work :(. Buyer BEWARE!
if isinstance(A, sintMatrix) and isinstance(B, sintMatrix):
C = sintMatrix(A.rows, B.columns)
D = sintArray(A.rows * B.columns * A.columns)
return _matmul(A, B, C, D, sint, nparallel)
#C = sintMatrix(left_rows, right_cols)
#D = sintArray(left_rows * right_cols * left_cols)
#return _matmul(A, B, C, D, sint, nparallel)
elif isinstance(A, cintMatrix) and isinstance(B, cintMatrix):
C = cintMatrix(A.rows, B.columns)
D = cintArray(A.rows * B.columns * A.columns)
return _matmul(A, B, C, D, cint, nparallel)
elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix):
C = sfixMatrix(A.rows, B.columns)
D = sfixArray(A.rows * B.columns * A.columns)
return _matmul(A, B, C, D, sfix, nparallel)
elif isinstance(A, cfixMatrixGC) or isinstance(B, cfixMatrixGC):
C = cfixMatrixGC(A.rows, B.columns)
_matmul_gc(A, B, C)
return C
elif isinstance(A, sfixMatrixGC) or isinstance(B, sfixMatrixGC):
C = sfixMatrixGC(A.rows, B.columns)
_matmul_gc(A, B, C)
return C
elif isinstance(A, MixMatrix) and isinstance(B, MixMatrix):
return _matmul_mix(A, B, nparallel)
elif isinstance(A, (sintMatrix, cintMatrix, cfixMatrix, sfixMatrix)) and isinstance(B, (sintMatrix, cintMatrix, cfixMatrix, sfixMatrix)):
C = sintMatrix(A.rows, B.columns)
D = sintArray(A.rows * B.columns * A.columns)
return _matmul(A, B, C, D, sint, nparallel)
else:
raise NotImplementedError
def _matadd(A, B, C, int_type, nparallel=1):
@for_range_multithread(nparallel, A.rows * A.columns, A.rows * A.columns)
def _add(i):
i_index = i / A.columns
j_index = i % A.columns
C[i_index][j_index] = A[i_index][j_index] + B[i_index][j_index]
def matadd(A, B, nparallel=1):
if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):
return np.add(A, B)
if A.rows != B.rows or A.columns != B.columns:
raise NotImplementedError
if isinstance(A, cintMatrix) and isinstance(B, cintMatrix):
C = cintMatrix(A.rows, A.columns)
_matadd(A, B, C, cint, nparallel)
return C
elif isinstance(A, sintMatrix) and isinstance(B, sintMatrix):
C = sintMatrix(A.rows, A.columns)
_matadd(A, B, C, sint, nparallel)
return C
elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix):
C = sfixMatrix(A.rows, A.columns)
_matadd(A, B, C, sfix, nparallel)
return C
elif type(A) in (sfixMatrix, cfixMatrix) and type(B) in (sfixMatrix, cfixMatrix):
C = sfixMatrix(A.rows, A.columns)
_matadd(A, B, C, sfix, nparallel)
return C
elif type(A) in (sfixMatrixGC, cfixMatrixGC) and type(B) in (sfixMatrixGC, cfixMatrixGC):
C = cfixMatrixGC(A.rows, A.columns, cfix_gc)
_matadd(A, B, C, cfix_gc, nparallel)
return C
def _matsub(A, B, C, int_type, nparallel=1):
@for_range_multithread(nparallel, A.rows * A.columns, A.rows * A.columns)
def _add(i):
i_index = i / A.columns
j_index = i % A.columns
C[i_index][j_index] = A[i_index][j_index] - B[i_index][j_index]
def _matsub_gc(A, B, C):
for i in range(A.rows):
for j in range(A.columns):
C[i][j] = A[i][j] - B[i][j]
def matsub(A, B, nparallel=1):
if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):
return np.subtract(A, B)
if A.rows != B.rows or A.columns != B.columns:
raise ValueError("[matsub] Matrices must have the same sizes")
if isinstance(A, cintMatrix) and isinstance(B, cintMatrix):
C = cintMatrix(A.rows, A.columns)
_matsub(A, B, C, cint, nparallel)
return C
elif isinstance(A, sintMatrix) and isinstance(B, sintMatrix):
C = sintMatrix(A.rows, A.columns)
_matsub(A, B, C, sint, nparallel)
return C
elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix):
C = sfixMatrix(A.rows, A.columns)
_matsub(A, B, C, sfix, nparallel)
return C
elif isinstance(A, sfixMatrixGC) and isinstance(B, sfixMatrixGC):
C = sfixMatrixGC(A.rows, A.columns)
_matsub_gc(A, B, C)
return C
else:
raise NotImplementedError
# horizontally stack the input matrices
def matstack_int(matrices):
pid = None
s = set([m.columns for m in matrices])
if s > 1:
raise ValueError("Can only stack matrices with the same number of columns")
num_rows_list = [m.rows for m in matrices]
M_rows = sum(num_rows_list)
M_columns = s.pop()
M = cintMatrix(M_rows, M_columns)
int_type = cint
pid = 0
s = set(type(m) for m in matrices)
if len(s) == 1 and cintMatrix in s:
M = cintMatrix(M_rows, M_columns)
int_type = cint
elif len(s) == 1 and pintMatrix in s:
parties = set([m.pid for m in matrices])
if len(parties) == 1:
pid = parties.pop()
M = pintMatrix(pid, M_rows, M_columns)
int_type = pint
else:
M = sintMatrix(M_rows, M_columns)
int_type = sint
else:
M = sintMatrix(M_rows, M_columns)
int_type = sint
row_count = 0
for m in matrices:
@for_range(m.rows)
def f(i):
@for_range(m.columns)
def g(j):
if int_type == pint:
M[row_count + i][j] = pint(pid, 0)
else:
M[row_count + i][j] = int_type(0)
M[row_count + i][j] += m[i][j]
return M
def matstack(matrices):
if isinstance(matrices[0], (cintMatrix, pintMatrix, sintMatrix)):
return matstack_int(matrices)
else:
raise NotImplementedError
def _sigmoid_sfix(v):
sign_v = cfix(1) - cfix(2) * (v < 0)
denom = (v * sign_v) + sfix(1)
res = v / denom
return res
def _sigmoid_sfix_gc(v):
abs_v = v.absolute()
denom = abs_v + cfix_gc(1)
res = v / denom
return res
def sigmoid(v, nparallel=1):
if isinstance(v, sfix):
return _sigmoid_sfix(v)
elif isinstance(v, (sfixMatrix)):
res = v.__class__(v.rows, v.columns)
@for_range_multithread(nparallel, v.rows, v.rows)
def a(i):
@for_range_multithread(nparallel, v.columns, v.columns)
def b(j):
res[i][j] = _sigmoid_sfix(v[i][j])
return res
elif isinstance(v, sfixMatrixGC):
res = v.__class__(v.rows, v.columns)
for i in range(v.rows):
for j in range(v.columns):
res[i][j] = _sigmoid_sfix_gc(v[i][j])
return res
else:
raise NotImplementedError
def mat_const_mul(c, m, nparallel=1):
if isinstance(m, np.ndarray):
if type(c) in (float, int):
return c * m
else:
raise ValueError("Type of constant is: {0} when expected float and int.".format(type(c)))
if isinstance(m, sfixMatrix) or isinstance(m, cfixMatrix):
if isinstance(m, sfixMatrix):
res = sfixMatrix(m.rows, m.columns)
else:
res = cfixMatrix(m.rows, m.columns)
"""
@for_range_multithread(nparallel, m.rows * m.columns, m.rows * m.columns)
def f(i):
@for_range_multithread(nparallel, m.columns, m.columns)
def g(j):
res[i][j] = c * m[i][j]
"""
@for_range_multithread(nparallel, m.rows * m.columns, m.rows * m.columns)
def loop(i):
i_index = i / m.columns
j_index = i % m.columns
res[i_index][j_index] = c * m[i_index][j_index]
return res
elif isinstance(m, sfixMatrixGC) or isinstance(m, cfixMatrixGC):
if isinstance(m, sfixMatrixGC):
res = sfixMatrixGC(m.rows, m.columns)
else:
res = cfixMatrixGC(m.rows, m.columns)
for i in range(m.rows):
for j in range(m.columns):
res[i][j] = c * m[i][j]
return res
else:
raise NotImplementedError
def mat_assign(o, i, nparallel=1):
if isinstance(i, (Array, ArrayGC)):
if o.length != i.length:
raise ValueError("Arrays must be of the same sizes")
if isinstance(i, Array):
@for_range(i.length)
def f(u):
o[u] = i[u]
elif isinstance(i, ArrayGC):
for u in range(i.length):
o[u] = i[u]
elif isinstance(i, (Matrix, MatrixGC)):
if o.rows != i.rows or o.columns != i.columns:
raise ValueError("Matrices must be of the same sizes")
if isinstance(i, Matrix):
@for_range_multithread(nparallel, i.rows, i.rows)
def f(u):
@for_range_multithread(nparallel, i.columns, i.columns)
def g(v):
o[u][v] = i[u][v]
elif isinstance(i, MatrixGC):
for u in range(i.rows):
for v in range(i.columns):
o[u][v] = i[u][v]
elif isinstance(i, list):
for u in range(len(i)):
o[u] = i[u]
else:
raise NotImplementedError
def array_index_secret_load_if(condition, l, index_1, index_2, nparallel=1):
supported_types_a = (sint, sfix)
supported_types_b = (sint_gc, sfix_gc)
if isinstance(index_1, supported_types_a) and isinstance(index_2, supported_types_a):
index = ((1 - condition) * index_1) + (condition * index_2)
return array_index_secret_load_a(l, index, nparallel=nparallel)
elif isinstance(index_1, supported_types_b) and isinstance(index_2, supported_types_b):
index = ((~condition) & index_1).__xor__(condition & index_2)
return array_index_secret_load_gc(l, index)
else:
raise NotImplementedError
def get_identity_matrix(value_type, n):
if isinstance(value_type, (sfix, sfixMatrix)):
ret = sfixMatrix(n, n)
@for_range(n)
def f(i):
@for_range(n)
def g(j):
v = (i == j)
v = sint(v)
vfix = sfix.load_sint(v)
ret[i][j] = vfix
return ret
elif isinstance(value_type, (sfix_gc, sfixMatrixGC, cfix_gc, cfixMatrixGC)):
ret = sfixMatrixGC(n, n)
for i in range(n):
for j in range(n):
ret[i][j] = cfix_gc(int(i == j))
return ret
else:
raise NotImplementedError
def cond_assign(cond, val1, val2):
res = ((~cond) & val1).__xor__(cond & val2)
return res
def matinv(A, nparallel=1):
if isinstance(A, np.ndarray):
return np.linalg.inv(A)
#if not isinstance(A, sfixMatrix) and not isinstance(A, cfixMatrix):
#raise NotImplementedError
n = A.rows
X = A.__class__(A.rows, A.columns, cfix_gc)
mat_assign(X, A)
I = get_identity_matrix(A, A.rows)
for j in range(n):
for i in range(j, n):
b1 = X[i][j].__lt__(cfix_gc(0.00001))
b2 = X[i][j].__gt__(cfix_gc(-0.00001))
b = ~(b1 & b2) #1 - b1 * b2
X[i][j] = b & X[i][j]
for k in range(n):
a1 = X[j][k]
a2 = X[i][k]
X[j][k] = cond_assign(b, a2, a1)
X[i][k] = cond_assign(b, a1, a2)
a1 = I[j][k]
a2 = I[i][k]
I[j][k] = cond_assign(b, a2, a1)
I[i][k] = cond_assign(b, a1, a2)
xjj_inv = cfix_gc(1).__div__(X[j][j])
t = cond_assign(b, xjj_inv, cfix_gc(1))
for k in range(n):
X[j][k] = t * X[j][k]
I[j][k] = t * I[j][k]
for L in range(j):
t = cfix_gc(-1) * X[L][j]
for k in range(n):
a1 = X[L][k] + t * X[j][k]
a2 = X[L][k]
b1 = I[L][k] + t * I[j][k]
b2 = I[L][k]
X[L][k] = cond_assign(b, a1, a2)
I[L][k] = cond_assign(b, b1, b2)
for L in range(j+1, n):
# from j+1 to n
t = cfix_gc(-1) * X[L][j]
for k in range(n):
a1 = X[L][k] + t * X[j][k]
a2 = X[L][k]
b1 = I[L][k] + t * I[j][k]
b2 = I[L][k]
X[L][k] = cond_assign(b, a1, a2)
I[L][k] = cond_assign(b, b1, b2)
return I
"""
@for_range(n)
def f0(j):
#@for_range(j, n)
@for_range(n)
def f1(i):
@if_(i >= j)
def h():
b1 = X[i][j].__lt__(sfix(0.00001))
b2 = X[i][j].__gt__(sfix(-0.00001))
b = 1 - b1 * b2
X[i][j] = b * X[i][j]
@for_range_multithread(nparallel, n, n)
def f2(k):
a1 = X[j][k]
a2 = X[i][k]
X[j][k] = cond_assign_a(b, a2, a1)
X[i][k] = cond_assign_a(b, a1, a2)
a1 = I[j][k]
a2 = I[i][k]
I[j][k] = cond_assign_a(b, a2, a1)
I[i][k] = cond_assign_a(b, a1, a2)
xjj_inv = sfix(1).__div__(X[j][j])
t = cond_assign_a(b, xjj_inv, sfix(1))
@for_range_multithread(nparallel, n, n)
def f3(k):
X[j][k] = t * X[j][k]
I[j][k] = t * I[j][k]
@for_range(n)
def f4(L):
@if_(L < j)
def h():
t = sfix(-1) * X[L][j]
@for_range_multithread(nparallel, n, n)
def g0(k):
a1 = X[L][k] + t * X[j][k]
a2 = X[L][k]
b1 = I[L][k] + t * I[j][k]
b2 = I[L][k]
X[L][k] = cond_assign_a(b, a1, a2)
I[L][k] = cond_assign_a(b, b1, b2)
# from j+1 to n
@for_range(n)
def f5(L):
@if_(L > j)
def h():
t = sfix(-1) * X[L][j]
@for_range_multithread(nparallel, n, n)
def g0(k):
a1 = X[L][k] + t * X[j][k]
a2 = X[L][k]
b1 = I[L][k] + t * I[j][k]
b2 = I[L][k]
X[L][k] = cond_assign_a(b, a1, a2)
I[L][k] = cond_assign_a(b, b1, b2)
return I
"""
# Assumes that the piecewise function is public for now
# Format: bounds in the form of [lower, upper]
# Function in the form of a*x + b
class Piecewise(object):
def __init__(self, num_boundaries):
self.lower_bound = sfixArray(3)
self.upper_bound = sfixArray(3)
self.boundary_points = sfixMatrix(num_boundaries - 2, 4)
self.counter = regint(0)
def add_boundary(self, lower, upper, a, b):
if lower is None:
self.lower_bound[0] = upper
self.lower_bound[1] = a
self.lower_bound[2] = b
elif upper is None:
self.upper_bound[0] = lower
self.upper_bound[1] = a
self.upper_bound[2] = b
else:
self.boundary_points[self.counter][0] = lower
self.boundary_points[self.counter][1] = upper
self.boundary_points[self.counter][2] = a
self.boundary_points[self.counter][3] = b
self.counter += regint(1)
# For debugging purposes only
def debug(self):
print_ln("[-inf, %s],: %s * x + %s", self.lower_bound[0].reveal(), self.lower_bound[1].reveal(), self.lower_bound[2].reveal())
@for_range(self.boundary_points.rows)
def f(i):
print_ln("[%s, %s]: %s * x + %s", self.boundary_points[i][0].reveal(), self.boundary_points[i][1].reveal(), self.boundary_points[i][2].reveal(), self.boundary_points[i][3].reveal())
print_ln("[%s, inf],: %s * x + %s", self.upper_bound[0].reveal(), self.upper_bound[1].reveal(), self.upper_bound[2].reveal())
def evaluate(self, x):
coefs = sfixArray(2)
coefs[0] = sfix(0)
coefs[1] = sfix(0)
# Check for lower bound
b = x.__le__(self.lower_bound[0])
coefs[0] += b * self.lower_bound[1]
coefs[1] += b * self.lower_bound[2]
@for_range(self.boundary_points.rows)
def f(i):
lower = self.boundary_points[i][0]
upper = self.boundary_points[i][1]
b1 = x.__gt__(lower)
b2 = x.__le__(upper)
b = b1 * b2
coefs[0] += b * self.boundary_points[i][2]
coefs[1] += b * self.boundary_points[i][3]
# Check for upper bound
b = x.__gt__(self.upper_bound[0])
coefs[0] += b * self.upper_bound[1]
coefs[1] += b * self.upper_bound[2]
res = coefs[0] * x + coefs[1]
return res
def LogisticRegression(X, y, batch_size, sgd_iters, dim):
assert(isinstance(X, Matrix))
assert(isinstance(y, Matrix))
if batch_size * sgd_iters >= X.rows:
raise ValueError("batch_size * sgd_iters = {0} * {1} >= # of rows in X: {2}".format(batch_size, sgd_iters. X.rows))
if batch_size * sgd_iters >= y.rows:
raise ValueError("batch_size * sgd_iters = {0} * {1} >= # of rows in X: {2}".format(batch_size, sgd_iters. X.rows))
if isinstance(X, sfixMatrix):
w = sfixMatrix(dim, 1)
#alpha_B = cfix(0.01 / batch_size) currently cfix and sfix multiplying doesn't work
alpha_B = cfix(0.01 / batch_size)
XB = sfixMatrix(batch_size, dim)
yB = sfixMatrix(batch_size, 1)
else:
w = sfixMatrixGC(dim, 1)
alpha_B = cfix_gc(0.01 / batch_size)
XB = sfixMatrixGC(batch_size, dim)
yB = sfixMatrixGC(batch_size, 1)
for i in range(sgd_iters):
batch_low = i * batch_size
batch_high = (i + 1) * batch_size
for j in range(batch_size):
for d in range(dim):
XB[j][d] = X[batch_low + j][d]
yB[j][0] = y[batch_low + j][0]
w_ret = matmul(XB, w, batch_size, dim, dim, 1, sfix)
#reveal_all(w_ret, "w_ret")
w_sigmoid = sigmoid(w_ret)
#reveal_all(w_sigmoid, "w_sigmoid")
w_sub = matsub(w_sigmoid, yB)
XB_T = transpose(XB)
w_1 = matmul(XB_T, w_sub, dim, batch_size, batch_size, 1, sfix)
#reveal_all(w_1, "w_1")
w_2 = mat_const_mul(alpha_B, w_1)
#reveal_all(w_2, "w_2")
w_res = matsub(w, w_2)
mat_assign(w, w_res)
#print_ln("Iter: %s", i)
return w
def DecisionTree(tree, levels):
w = tree[0]
for i in range(levels-1):
index = w[0]
split = w[1]
left_child = w[2]
right_child = w[3]
f = x[index]
cond = (f < split)
w_res = array_index_secret_load_if(cond, tree, left_child, right_child)
mat_assign(w, w_res)
# Return the final prediction class.
return w[1]
def get_ith_matrix(mat, index, rows, cols, mat_type=sfixMatrix):
#ret = s_fix_mat(rows, cols)
#ret = sfixMatrix(rows, cols)
ret = mat_type(rows, cols)
for i in range(rows):
for j in range(cols):
ret[i][j] = mat[index * rows + i][j]
return ret
def copy_ith_matrix(dest, src, index, rows, cols):
for i in range(rows):
for j in range(cols):
dest[index * rows + i][j] = src[i][j]
# Local computation of weight vector.
def admm_local(XXinv, Xy, u, z, rho, num_cols):
temp = matsub(z, u)
z_u = mat_const_mul(rho, temp)
#for i in range(z_u.rows):
#print_ln("Admm local z: %s, temp: %s", z_u[i][0].reveal(), temp[i][0].reveal())
second_term = matadd(Xy, z_u) #add_matrices(Xy, z_u, NUM_COLS, 1)
w = matmul(XXinv, second_term, num_cols, num_cols, num_cols, 1, sfix)
return w
def soft_threshold_vec(threshold, vec, num_cols, mat_type=sfixMatrix):
#vec_new = s_fix_mat(NUM_COLS, 1)
#vec_new = sfixMatrix(num_cols, 1)
vec_new = mat_type(num_cols, 1)
neg_threshold = sfix(-1) * threshold
#neg_threshold = threshold.__neg__()
for i in range(num_cols):
threshold_fn = Piecewise(3)
threshold_fn.add_boundary(None, neg_threshold, sfix(0), vec[i][0] + threshold)
#threshold_fn.add_boundary(None, neg_threshold, c_fix(0), vec[i][0] + threshold)
threshold_fn.add_boundary(neg_threshold, threshold, sfix(0), sfix(0))
#threshold_fn.add_boundary(neg_threshold, threshold, c_fix(0), c_fix(0))
threshold_fn.add_boundary(threshold, None, sfix(0), vec[i][0] - threshold)
#threshold_fn.add_boundary(threshold, None, c_fix(0), vec[i][0] - threshold)
val = threshold_fn.evaluate(vec[i][0])
vec_new[i][0] = val
return vec_new
def admm_coordinate(w_list, u_list, z, rho, l, num_cols, num_parties, mat_type=sfixMatrix):
#w_avg = s_fix_mat(num_cols, 1)
#u_avg = s_fix_mat(num_cols, 1)
#w_avg = sfixMatrix(num_cols, 1)
#u_avg = sfixMatrix(num_cols, 1)
w_avg = mat_type(num_cols, 1)
u_avg = mat_type(num_cols, 1)
w_avg = mat_const_mul(cfix(0), w_avg)
u_avg = mat_const_mul(cfix(0), u_avg)
for i in range(num_parties):
w = get_ith_matrix(w_list, i, num_cols, 1, mat_type)
u = get_ith_matrix(u_list, i, num_cols, 1, mat_type)
new_w_avg = matadd(w_avg, w) #add_matrices(w_avg, w, NUM_COLS, 1)
new_u_avg = matadd(u_avg, u) #add_matrices(u_avg, u, NUM_COLS, 1)
mat_assign(w_avg, new_w_avg)
mat_assign(u_avg, new_u_avg)
#avg = c_fix(1.0 / NUM_PARTIES) cfix multiplication doesn't work
if mat_type in [sfixMatrix, sintMatrix]:
avg = sfix(1.0 / num_parties) # Changing THIS line to cfix completely breaks everything wtf.
threshold = l / (rho * num_parties) #sfix(l/(rho * num_parties))
else:
avg = sfix_gc(1.0 / num_parties)
threshold = sfix_gc(l/(rho * num_parties))
"""
for i in range(w_avg.rows):
print_ln("w_avg_mul: %s, w_avg: %s", (w_avg[i][0] * cfix(1.0 / num_parties)).reveal(), w_avg[i][0].reveal())
print_ln("u_avg_mul: %s, u_avg: %s", (u_avg[i][0] * cfix(1.0 / num_parties)).reveal(), u_avg[i][0].reveal())
"""
new_w_avg = mat_const_mul(avg, w_avg)
new_u_avg = mat_const_mul(avg, u_avg)
mat_assign(w_avg, new_w_avg)
mat_assign(u_avg, new_u_avg)
# Applying thresholding
u_plus_w = matadd(w_avg, u_avg)
z_new = soft_threshold_vec(threshold, u_plus_w, num_cols, mat_type)
#u_list_new = s_fix_mat(num_parties * num_cols, 1)
#neg_z = s_fix_mat(num_cols, 1)
#u_list_new = sfixMatrix(num_parties * num_cols, 1)
#neg_z = sfixMatrix(num_cols, 1)
u_list_new = mat_type(num_parties * num_cols, 1)
neg_z = mat_type(num_cols, 1)
for i in range(z_new.rows):
for j in range(z_new.columns):
neg_z[i][j] = z_new[i][j].__neg__()
for i in range(num_parties):
u_i = get_ith_matrix(u_list, i, num_cols, 1, mat_type)
w_i = get_ith_matrix(w_list, i, num_cols, 1, mat_type)
intermediate_vec = matadd(u_i, w_i) #add_matrices(u_i, w_i, NUM_COLS, 1)
sum_vec = matadd(intermediate_vec, neg_z) #add_matrices(intermediate_vec, neg_z, NUM_COLS, 1)
copy_ith_matrix(u_list_new, sum_vec, i, num_cols, 1)
#reveal_all(z_new, "intermediate_weights")
return u_list_new, z_new
def ADMM_preprocess(x_data, y_data, rho, num_parties, num_rows, num_cols, mat_type=sfixMatrix):
#XTX_inv_lst = s_fix_mat(NUM_PARTIES * NUM_COLS, NUM_COLS)
#XTy_lst = s_fix_mat(NUM_PARTIES * NUM_COLS, 1)
#XTX_inv_lst = sfixMatrix(num_parties * num_cols, num_cols)
#XTy_lst = sfixMatrix(num_parties * num_cols, 1)
XTX_inv_lst = mat_type(num_parties * num_cols, num_cols)
XTy_lst = mat_type(num_parties * num_cols, 1)
for i in range(num_parties):
x_i = get_ith_matrix(x_data, i, num_rows, num_cols, mat_type)
y_i = get_ith_matrix(y_data, i, num_rows, 1, mat_type)
X_T = transpose(x_i)
XTy = matmul(X_T, y_i, num_cols, num_rows, num_rows, 1, sfix)
XTX = matmul(X_T, x_i, num_cols, num_rows, num_rows, num_cols, sfix)
#rho_identity = s_fix_mat(NUM_COLS, NUM_COLS)
#rho_identity = sfixMatrix(num_cols, num_cols)
rho_identity = mat_type(num_cols, num_cols)
rho_identity = mat_const_mul(cfix(0), rho_identity)
for j in range(num_cols):
rho_identity[j][j] = rho #rho_val #sfix(rho_val)
XTX_rho_identity = matadd(XTX, rho_identity) #add_matrices(XTX, rho_identity, NUM_COLS, NUM_COLS)
XTX_inv = matinv(XTX_rho_identity)
copy_ith_matrix(XTX_inv_lst, XTX_inv, i, num_cols, num_cols)
copy_ith_matrix(XTy_lst, XTy, i, num_cols, 1)
return XTX_inv_lst, XTy_lst
def ADMM(XTX_inv_lst, XTy_lst, admm_iter, num_parties, num_cols, rho, l):
#XTX_inv_lst, XTy_lst = local_compute(x_data, y_data, num_parties. num_rows, num_cols)
#w_list = s_fix_mat(num_parties * num_cols, 1)
mat_type = None
if isinstance(XTX_inv_lst, sfixMatrix):
mat_type = sfixMatrix
elif isinstance(XTX_inv_lst, sfixMatrixGC):
mat_type = sfixMatrixGC
elif isinstance(XTX_inv_lst, sintMatrix):
mat_type = sintMatrix
else:
raise ValueError("Type of matrix: {0} does not correspond to anything supported by this library".format(mat_type))
#w_list = sfixMatrix(num_parties * num_cols, 1)
#u_list = sfixMatrix(num_parties * num_cols, 1)
#z = sfixMatrix(num_cols, 1)
w_list = mat_type(num_parties * num_cols, 1)
u_list = mat_type(num_parties * num_cols, 1)
z = mat_type(num_cols, 1)
w_list = mat_const_mul(cfix(0), w_list)
u_list = mat_const_mul(cfix(0), u_list)
z = mat_const_mul(cfix(0), z)
"""
for i in range(w_list.rows):
for j in range(w_list.columns):
print_ln("%s, %s", w_list[i][j].reveal(), u_list[i][j].reveal())
"""
for i in range(admm_iter):
for j in range(num_parties):
XTX_inv = get_ith_matrix(XTX_inv_lst, j, num_cols, num_cols, mat_type)
XTy = get_ith_matrix(XTy_lst, j, num_cols, 1, mat_type)
u = get_ith_matrix(u_list, j, num_cols, 1, mat_type)
w = admm_local(XTX_inv, XTy, u, z, rho, num_cols)
#reveal_all(w, "local_weight")
copy_ith_matrix(w_list, w, j, num_cols, 1)
new_u_lst, new_z = admm_coordinate(w_list, u_list, z, rho, l, num_cols, num_parties, mat_type)
mat_assign(u_list, new_u_lst)
mat_assign(z, new_z)
return z
|
nilq/baby-python
|
python
|
from flask_app.factory import create_app
app = create_app('meeting-scheduler')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import numpy as np
class BPTTBatches(object):
"""Wraps a list of sequences as a contiguous batch iterator.
This will iterate over batches of contiguous subsequences of size
``seq_length``. TODO: elaborate
Example:
.. code-block:: python
# Dictionary
# Sequence of length 1000
data = np.random.randint(10, size=1000)
# Iterator with over subsequences of length 20 with batch size 5
batched_dataset = BPTTBatches(data, batch_size=5, seq_length=20)
# Training loop
for x, y in batched_dataset:
# x has and y have shape (seq_length, batch_size)
# y[i+1] == x[i]
# Do something with x
Args:
data (list): List of numpy arrays containing the data
targets (list): List of targets
batch_size (int, optional): Batch size
seq_length (int, optional): BPTT length
"""
def __init__(
self,
data,
batch_size=32,
seq_length=30,
):
# Get one list
if isinstance(data[0], list):
data = [word for sent in data for word in sent]
# Parameters
self.num_samples = len(data)
self.num_samples -= self.num_samples % batch_size
self.num_positions = self.num_samples//batch_size
self.num_batches = int(np.ceil(self.num_positions / seq_length))
self.batch_size = batch_size
self.seq_length = seq_length
# The data is stored as an array of shape (-1, batch_size)
self.data = np.stack([
np.asarray(
data[b*self.num_positions:(b+1)*self.num_positions],
dtype=type(data[0])
)
for b in range(self.batch_size)],
axis=-1
)
# Reset position and shuffle the order if applicable
self.reset()
def __len__(self):
"""This returns the number of **batches** in the dataset
(not the total number of samples)
Returns:
int: Number of batches in the dataset
``ceil(len(data)/batch_size)``
"""
return self.num_batches
def __getitem__(self, index):
"""Returns the ``index`` th sample
The result is a tuple ``x, next_x`` of numpy arrays of shape
``seq_len x batch_size`` ``seq_length`` is determined by the range
specified by ``index``, and ``next_x[t]=x[t+1]`` for all ``t``
Args:
index (int, slice): Index or slice
Returns:
tuple: ``x, next_x``
"""
return self.data[index]
def percentage_done(self):
"""What percent of the data has been covered in the current epoch"""
return 100 * (self.position / self.num_positions)
def just_passed_multiple(self, batch_number):
"""Checks whether the current number of batches processed has
just passed a multiple of ``batch_number``.
For example you can use this to report at regular interval
(eg. every 10 batches)
Args:
batch_number (int): [description]
Returns:
bool: ``True`` if :math:`\\fraccurrent_batch`
"""
return (self.position // self.seq_length) % batch_number == 0
def reset(self):
"""Reset the iterator and shuffle the dataset if applicable"""
self.position = 0
def __iter__(self):
self.reset()
return self
def __next__(self):
# Check for end of epoch
if self.position >= self.num_positions-1:
raise StopIteration
# Batch index
seq_len = min(self.seq_length, self.num_positions-1-self.position)
batch = self[self.position:self.position+seq_len+1]
# Increment position
self.position += seq_len
# Return batch
return batch[:-1], batch[1:]
|
nilq/baby-python
|
python
|
# A module to make your error messages less scary
import sys
from characters import AsciiCharacter
def output_ascii(err_message="You certainly messed something up."):
one_line = False
err_line_1 = err_message.split('--')[0]
try:
err_line_2 = err_message.split('--')[1]
except:
one_line = True
err_line_2 = err_line_1
if len(err_line_1) >= len(err_line_2):
max_length = len(err_line_1)
long_line_label = 1
else:
max_length = len(err_line_2)
long_line_label = 2
ascii_art = AsciiCharacter().character
s1 = " " * 16 + "_" * (max_length + 6)
s2 = " " * 15 + "/" + " " * (max_length + 6) + "\\"
if not one_line:
if long_line_label == 1:
length_diff = len(err_line_1) - len(err_line_2)
s3 = " " * 15 + "|" + " " * 3 + err_line_1 + " " * 3 + "|"
s4 = " " * 15 + "|" + " " * 3 + err_line_2 + " " * length_diff + " " * 3 + "|"
elif long_line_label == 2:
length_diff = len(err_line_2) - len(err_line_1)
s3 = " " * 15 + "|" + " " * 3 + err_line_1 + " " * length_diff + " " * 3 + "|"
s4 = " " * 15 + "|" + " " * 3 + err_line_2 + " " * 3 + "|"
else:
s34 = " " * 15 + "|" + " " * 3 + err_message + " " * 3 + "|"
s5 = " " * 15 + "\\" + " " * 2 + "_" * (max_length + 4) + "/"
s6 = " " * 14 + "/ /"
if not one_line:
speech_bubble = s1 + "\n" + s2 + "\n" + s3 + "\n" + s4 + "\n" + s5 + '\n' + s6
else:
speech_bubble = s1 + "\n" + s2 + "\n" + s34 + "\n" + s5 + '\n' + s6
print("\n\n\n" + speech_bubble + ascii_art + "\n\n\n")
return
|
nilq/baby-python
|
python
|
from keras.preprocessing.image import load_img, img_to_array
target_image_path = 'img/a.jpg'
style_image_path = 'img/a.png'
width, height = load_img(target_image_path).size
img_height = 400
img_width = int(width * img_height / height)
import numpy as np
from keras.applications import vgg19
def preprocess_image(image_path):
img = load_img(image_path, target_size = (img_height, img_width))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg19.preprocess_input(img)
return img
def deprocess_image(x):
x[:,:,0] += 103.939
x[:,:,1] += 116.779
x[:,:,2] += 123.68
x = x[:,:,::-1]
x = np.clip(x,0,255).astype('uint8')
return x
from keras import backend as K
target_image = K.constant(preprocess_image(target_image_path))
style_image = K.constant(preprocess_image(style_image_path))
combination_image = K.placeholder((1,img_height, img_width, 3))
input_tensor = K.concatenate([target_image, style_image, combination_image], axis = 0)
model = vgg19.VGG19(input_tensor=input_tensor, weights='imagenet', include_top=False)
model.summary()
def content_loss(base, combination):
return K.sum(K.square(combination - base))
def gram_matrix(x):
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
def style_loss(style, combination):
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_height * img_width
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
def total_variation_loss(x):
a = K.square(x[:, :img_height-1, :img_width-1, :] - x[:, 1:, :img_width-1, :])
b = K.square(x[:, :img_height-1, :img_width-1, :] - x[:, :img_height-1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
content_layer = 'block5_conv2'
style_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1']
total_variation_weight = 1e-4
style_weight = 1.
content_weight = 0.025
loss = K.variable(0.)
layer_features = outputs_dict[content_layer]
target_image_features = layer_features[0,:,:,:]
combination_features = layer_features[2,:,:,:]
loss += content_weight * content_loss(target_image_features, combination_features)
for layer_name in style_layers:
layer_features = outputs_dict[layer_name]
style_features = layer_features[1,:,:,:]
combination_features = layer_features[2,:,:,:]
sl = style_loss(style_features, combination_features)
loss += (style_weight / len(style_layers)) * sl
loss += total_variation_weight * total_variation_loss(combination_image)
grads = K.gradients(loss, combination_image)[0]
fetch_loss_and_grads = K.function([combination_image], [loss, grads])
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
x = x.reshape((1,img_height, img_width, 3))
outs = fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1].flatten().astype('float64')
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
import time
result_prefix = 'my_result'
iterations = 20
x = preprocess_image(target_image_path)
x = x.flatten()
for i in range(iterations):
print('Start of iteration', i)
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x, fprime=evaluator.grads, maxfun=20)
print('Current loss value:', min_val)
img = x.copy().reshape((img_height, img_width, 3))
img = deprocess_image(img)
fname=result_prefix + '_at_iteration_%d.png' % i
imsave(fname, img)
print('Image saved as', fname)
end_time = time.time()
print('Iterations %d completed in %ds' % (i, end_time - start_time))
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class Demo(object):
__x = 0
def __init__(self, i):
self.__i = i
Demo.__x += 1
def __str__(self):
return str(self.__i)
def hello(self):
print("hello " + self.__str__())
@classmethod
def getX(cls):
return cls.__x
class Other(object):
def __init__(self, k):
self.k = k
def __str__(self):
return str(self.k)
def hello(self):
print("hello, world")
def bye(self):
print("Good-bye!", self.__str__())
class SubDemo(Demo, Other):
def __init__(self, i, j):
super(SubDemo, self).__init__(i)
self.__j = j
def __str__(self):
return super(SubDemo, self).__str__() + "+" + str(self.__j)
|
nilq/baby-python
|
python
|
import abc
from typing import Callable
from typing import Iterator
from typing import List
from typing import Optional
from xsdata.codegen.models import Class
from xsdata.models.config import GeneratorConfig
from xsdata.utils.constants import return_true
class ContainerInterface(metaclass=abc.ABCMeta):
"""Wrap a list of classes and expose a simple api for easy access and
process."""
config: GeneratorConfig
@abc.abstractmethod
def iterate(self) -> Iterator[Class]:
"""Create an iterator for the class map values."""
@abc.abstractmethod
def find(self, qname: str, condition: Callable = return_true) -> Optional[Class]:
"""Search by qualified name for a specific class with an optional
condition callable."""
@abc.abstractmethod
def find_inner(self, source: Class, qname: str) -> Class:
"""Search by qualified name for a specific inner class or fail."""
@abc.abstractmethod
def add(self, item: Class):
"""Add class item to the container."""
@abc.abstractmethod
def extend(self, items: List[Class]):
"""Add a list of classes the container."""
class HandlerInterface(metaclass=abc.ABCMeta):
"""Class handler interface."""
@abc.abstractmethod
def process(self, target: Class):
"""Process the given target class."""
|
nilq/baby-python
|
python
|
#testing the concept
import re
#file_name = raw_input("Enter textfile name (ex. hamlet.txt): ")
def app(f_name):
fd = open(f_name, 'r')
fd = fd.read()
lines = fd.split('\n')
c1 = 0
while(c1 < len(lines)):
#lines[c1] = re.sub('[^0-9a-zA-Z]+', '', lines[c1])
if len(lines[c1]) == 0:
lines.pop(c1)
c1+=1
return lines
def game():
lines = app('hamlet.txt')
print lines
current_line = 0
while current_line < len(lines):
if current_line == 0:
guess = raw_input("\nFirst line: ")
print guess
if re.sub('[^0-9a-zA-Z]+','',guess).lower() == re.sub('[^0-9a-zA-Z]+','',lines[current_line]).lower():
current_line += 1
if current_line > 0:
print "\nPrevious line: " + lines[current_line - 1]
guess = raw_input("Line: ")
"""
print "==========="
print re.sub('[^0-9a-zA-Z]+','',guess).lower()
temp = lines[current_line]
print re.sub('[^0-9a-zA-Z]+','',temp).lower()
print "==========="
"""
if re.sub('[^0-9a-zA-Z]+','',guess).lower() == re.sub('[^0-9a-zA-Z]+','',lines[current_line]).lower():
current_line += 1
print "FINISHED!"
game()
|
nilq/baby-python
|
python
|
import pytest
import os, time
import sys
from datetime import date, datetime
from pytest_html_reporter.template import html_template
from pytest_html_reporter.time_converter import time_converter
from os.path import isfile, join
import json
import glob
from collections import Counter
from PIL import Image
from io import BytesIO
import shutil
_total = _executed = 0
_pass = _fail = 0
_skip = _error = 0
_xpass = _xfail = 0
_apass = _afail = 0
_askip = _aerror = 0
_axpass = _axfail = 0
_astotal = 0
_aspass = 0
_asfail = 0
_asskip = 0
_aserror = 0
_asxpass = 0
_asxfail = 0
_asrerun = 0
_current_error = ""
_suite_name = _test_name = None
_scenario = []
_test_suite_name = []
_test_pass_list = []
_test_fail_list = []
_test_skip_list = []
_test_xpass_list = []
_test_xfail_list = []
_test_error_list = []
_test_status = None
_start_execution_time = 0
_execution_time = _duration = 0
_test_metrics_content = _suite_metrics_content = ""
_previous_suite_name = "None"
_initial_trigger = True
_spass_tests = 0
_sfail_tests = 0
_sskip_tests = 0
_serror_tests = 0
_srerun_tests = 0
_sxfail_tests = 0
_sxpass_tests = 0
_suite_length = 0
_archive_tab_content = ""
_archive_body_content = ""
_archive_count = ""
archive_pass = 0
archive_fail = 0
archive_skip = 0
archive_xpass = 0
archive_xfail = 0
archive_error = 0
archives = {}
highlights = {}
p_highlights = {}
max_failure_suite_name = ''
max_failure_suite_name_final = ''
max_failure_suite_count = 0
similar_max_failure_suite_count = 0
max_failure_total_tests = 0
max_failure_percent = ''
trends_label = []
tpass = []
tfail = []
tskip = []
_previous_test_name = ''
_suite_error = 0
_suite_fail = 0
_pvalue = 0
screen_base = ''
screen_img = None
_attach_screenshot_details = ''
_title = 'PYTEST REPORT'
def pytest_addoption(parser):
group = parser.getgroup("report generator")
group.addoption(
"--html-report",
action="store",
dest="path",
default=".",
help="path to generate html report",
)
group.addoption(
"--title",
action="store",
dest="title",
default="PYTEST REPORT",
help="customize report title",
)
def pytest_configure(config):
path = config.getoption("path")
clean_screenshots(path)
title = config.getoption("title")
custom_title(title)
config._html = HTMLReporter(path, config)
config.pluginmanager.register(config._html)
def suite_highlights(data):
global highlights, p_highlights
for i in data['content']['suites']:
if data['content']['suites'][i]['status']['total_fail'] == 0:
l = data['content']['suites'][i]['suite_name']
if l not in p_highlights:
p_highlights[l] = 1
else:
p_highlights[l] += 1
else:
k = data['content']['suites'][i]['suite_name']
if k not in highlights:
highlights[k] = 1
else:
highlights[k] += 1
def generate_suite_highlights():
global max_failure_suite_name, max_failure_suite_count, similar_max_failure_suite_count, max_failure_total_tests
global max_failure_percent, max_failure_suite_name_final
if highlights == {}:
max_failure_suite_name_final = 'No failures in History'
max_failure_suite_count = 0
max_failure_percent = '0'
return
max_failure_suite_name = max(highlights, key=highlights.get)
max_failure_suite_count = highlights[max_failure_suite_name]
if max_failure_suite_name in p_highlights:
max_failure_total_tests = p_highlights[max_failure_suite_name] + max_failure_suite_count
else:
max_failure_total_tests = max_failure_suite_count
max_failure_percent = (max_failure_suite_count / max_failure_total_tests) * 100
if max_failure_suite_name.__len__() > 25:
max_failure_suite_name_final = ".." + max_failure_suite_name[-23:]
else:
max_failure_suite_name_final = max_failure_suite_name
res = Counter(highlights.values())
if max(res.values()) > 1: similar_max_failure_suite_count = max(res.values())
def max_rerun():
indices = [i for i, s in enumerate(sys.argv) if 'reruns' in s]
try:
if "=" in sys.argv[int(indices[0])]:
return int(sys.argv[int(indices[0])].split('=')[1])
else:
return int(sys.argv[int(indices[0]) + 1])
except IndexError:
return None
def screenshot(data=None):
global screen_base, screen_img
screen_base = HTMLReporter.base_path
screen_img = Image.open(BytesIO(data))
def clean_screenshots(path):
screenshot_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(path))) + '/pytest_screenshots'
if os.path.isdir(screenshot_dir):
shutil.rmtree(screenshot_dir)
def custom_title(title):
global _title
_title = title[:26] + '...' if title.__len__() > 29 else title
class HTMLReporter(object):
def __init__(self, path, config):
self.json_data = {'content': {'suites': {0: {'status': {}, 'tests': {0: {}}, }, }}}
self.path = path
self.config = config
has_rerun = config.pluginmanager.hasplugin("rerunfailures")
self.rerun = 0 if has_rerun else None
def pytest_runtest_teardown(self, item, nextitem):
global _test_name, _duration
_test_name = item.name
_test_end_time = time.time()
_duration = _test_end_time - _start_execution_time
if (self.rerun is not None) and (max_rerun() is not None): self.previous_test_name(_test_name)
self._test_names(_test_name)
self.append_test_metrics_row()
def previous_test_name(self, _test_name):
global _previous_test_name
if _previous_test_name == _test_name:
self.rerun += 1
else:
_scenario.append(_test_name)
self.rerun = 0
_previous_test_name = _test_name
def pytest_runtest_setup(item):
global _start_execution_time
_start_execution_time = time.time()
def pytest_sessionfinish(self, session):
if _suite_name is not None: self.append_suite_metrics_row(_suite_name)
def archive_data(self, base, filename):
path = os.path.join(base, filename)
if os.path.isfile(path) is True:
os.makedirs(base + '/archive', exist_ok=True)
f = 'output.json'
if isfile(join(base, f)):
fname = os.path.splitext(f)
os.rename(base + '/' + f, os.path.join(base + '/archive', fname[0] + '_' +
str(_start_execution_time) + fname[1]))
@property
def report_path(self):
if '.html' in self.path:
path = '.' if '.html' in self.path.rsplit('/', 1)[0] else self.path.rsplit('/', 1)[0]
if path == '': path = '.'
logfile = os.path.expanduser(os.path.expandvars(path))
HTMLReporter.base_path = os.path.abspath(logfile)
return os.path.abspath(logfile), self.path.split('/')[-1]
else:
logfile = os.path.expanduser(os.path.expandvars(self.path))
HTMLReporter.base_path = os.path.abspath(logfile)
return os.path.abspath(logfile), 'pytest_html_report.html'
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(self, terminalreporter, exitstatus, config):
yield
global _execution_time
_execution_time = time.time() - terminalreporter._sessionstarttime
if _execution_time < 60:
_execution_time = str(round(_execution_time, 2)) + " secs"
else:
_execution_time = str(time.strftime("%H:%M:%S", time.gmtime(round(_execution_time)))) + " Hrs"
global _total
_total = _pass + _fail + _xpass + _xfail + _skip + _error
if _suite_name is not None:
base = self.report_path[0]
path = os.path.join(base, self.report_path[1])
os.makedirs(base, exist_ok=True)
self.archive_data(base, self.report_path[1])
# generate json file
self.generate_json_data(base)
# generate trends
self.update_trends(base)
# generate archive template
self.update_archives_template(base)
# generate suite highlights
generate_suite_highlights()
# generate html report
live_logs_file = open(path, 'w')
message = self.renew_template_text('https://i.imgur.com/LRSRHJO.png')
live_logs_file.write(message)
live_logs_file.close()
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(self, item, call):
outcome = yield
rep = outcome.get_result()
global _suite_name
_suite_name = rep.nodeid.split("::")[0]
if _initial_trigger:
self.update_previous_suite_name()
self.set_initial_trigger()
if str(_previous_suite_name) != str(_suite_name):
self.append_suite_metrics_row(_previous_suite_name)
self.update_previous_suite_name()
else:
self.update_counts(rep)
if rep.when == "call" and rep.passed:
if hasattr(rep, "wasxfail"):
self.increment_xpass()
self.update_test_status("xPASS")
global _current_error
self.update_test_error("")
else:
self.increment_pass()
self.update_test_status("PASS")
self.update_test_error("")
if rep.failed:
if getattr(rep, "when", None) == "call":
if hasattr(rep, "wasxfail"):
self.increment_xpass()
self.update_test_status("xPASS")
self.update_test_error("")
else:
self.increment_fail()
self.update_test_status("FAIL")
if rep.longrepr:
longerr = ""
for line in rep.longreprtext.splitlines():
exception = line.startswith("E ")
if exception:
longerr += line + "\n"
self.update_test_error(longerr.replace("E ", ""))
else:
self.increment_error()
self.update_test_status("ERROR")
if rep.longrepr:
longerr = ""
for line in rep.longreprtext.splitlines():
longerr += line + "\n"
self.update_test_error(longerr)
if rep.skipped:
if hasattr(rep, "wasxfail"):
self.increment_xfail()
self.update_test_status("xFAIL")
if rep.longrepr:
longerr = ""
for line in rep.longreprtext.splitlines():
exception = line.startswith("E ")
if exception:
longerr += line + "\n"
self.update_test_error(longerr.replace("E ", ""))
else:
self.increment_skip()
self.update_test_status("SKIP")
if rep.longrepr:
longerr = ""
for line in rep.longreprtext.splitlines():
longerr += line + "\n"
self.update_test_error(longerr)
def append_test_metrics_row(self):
global _test_metrics_content, _pvalue, _duration
test_row_text = """
<tr>
<td style="word-wrap: break-word;max-width: 200px; white-space: normal; text-align:left">__sname__</td>
<td style="word-wrap: break-word;max-width: 200px; white-space: normal; text-align:left">__name__</td>
<td>__stat__</td>
<td>__dur__</td>
<td style="word-wrap: break-word;max-width: 200px; white-space: normal; text-align:left"">
__msg__
__floating_error_text__
</td>
</tr>
"""
floating_error_text = """
<a data-toggle="modal" href="#myModal-__runt__" class="">(...)</a>
<div class="modal fade in" id="myModal-__runt__" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-body">
<p>
<svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" width="1.12em" height="1em" style="-ms-transform: rotate(360deg); -webkit-transform: rotate(360deg); transform: rotate(360deg);" preserveAspectRatio="xMidYMid meet" viewBox="0 0 1856 1664"><path d="M1056 1375v-190q0-14-9.5-23.5t-22.5-9.5H832q-13 0-22.5 9.5T800 1185v190q0 14 9.5 23.5t22.5 9.5h192q13 0 22.5-9.5t9.5-23.5zm-2-374l18-459q0-12-10-19q-13-11-24-11H818q-11 0-24 11q-10 7-10 21l17 457q0 10 10 16.5t24 6.5h185q14 0 23.5-6.5t10.5-16.5zm-14-934l768 1408q35 63-2 126q-17 29-46.5 46t-63.5 17H160q-34 0-63.5-17T50 1601q-37-63-2-126L816 67q17-31 47-49t65-18t65 18t47 49z" fill="#DC143C"/></svg>
__full_msg__
</p>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-primary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
"""
if (self.rerun is not None) and (max_rerun() is not None):
if (_test_status == 'FAIL') or (_test_status == 'ERROR'): _pvalue += 1
if (_pvalue == max_rerun() + 1) or (_test_status == 'PASS'):
if ((_test_status == 'FAIL') or (_test_status == 'ERROR')) and (
screen_base != ''): self.generate_screenshot_data()
test_row_text = test_row_text.replace("__sname__", str(_suite_name))
test_row_text = test_row_text.replace("__name__", str(_test_name))
test_row_text = test_row_text.replace("__stat__", str(_test_status))
test_row_text = test_row_text.replace("__dur__", str(round(_duration, 2)))
test_row_text = test_row_text.replace("__msg__", str(_current_error[:50]))
floating_error_text = floating_error_text.replace("__runt__", str(time.time()).replace('.', ''))
if len(_current_error) < 49:
test_row_text = test_row_text.replace("__floating_error_text__", str(''))
else:
test_row_text = test_row_text.replace("__floating_error_text__", str(floating_error_text))
test_row_text = test_row_text.replace("__full_msg__", str(_current_error))
_test_metrics_content += test_row_text
_pvalue = 0
elif (self.rerun is not None) and (
(_test_status == 'xFAIL') or (_test_status == 'xPASS') or (_test_status == 'SKIP')):
test_row_text = test_row_text.replace("__sname__", str(_suite_name))
test_row_text = test_row_text.replace("__name__", str(_test_name))
test_row_text = test_row_text.replace("__stat__", str(_test_status))
test_row_text = test_row_text.replace("__dur__", str(round(_duration, 2)))
test_row_text = test_row_text.replace("__msg__", str(_current_error[:50]))
floating_error_text = floating_error_text.replace("__runt__", str(time.time()).replace('.', ''))
if len(_current_error) < 49:
test_row_text = test_row_text.replace("__floating_error_text__", str(''))
else:
test_row_text = test_row_text.replace("__floating_error_text__", str(floating_error_text))
test_row_text = test_row_text.replace("__full_msg__", str(_current_error))
_test_metrics_content += test_row_text
elif (self.rerun is None) or (max_rerun() is None):
if ((_test_status == 'FAIL') or (_test_status == 'ERROR')) and (
screen_base != ''): self.generate_screenshot_data()
test_row_text = test_row_text.replace("__sname__", str(_suite_name))
test_row_text = test_row_text.replace("__name__", str(_test_name))
test_row_text = test_row_text.replace("__stat__", str(_test_status))
test_row_text = test_row_text.replace("__dur__", str(round(_duration, 2)))
test_row_text = test_row_text.replace("__msg__", str(_current_error[:50]))
floating_error_text = floating_error_text.replace("__runt__", str(time.time()).replace('.', ''))
if len(_current_error) < 49:
test_row_text = test_row_text.replace("__floating_error_text__", str(''))
else:
test_row_text = test_row_text.replace("__floating_error_text__", str(floating_error_text))
test_row_text = test_row_text.replace("__full_msg__", str(_current_error))
_test_metrics_content += test_row_text
self.json_data['content']['suites'].setdefault(len(_test_suite_name), {})['suite_name'] = str(_suite_name)
self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests', {}).setdefault(
len(_scenario) - 1, {})['status'] = str(_test_status)
self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests', {}).setdefault(
len(_scenario) - 1, {})['message'] = str(_current_error)
self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests', {}).setdefault(
len(_scenario) - 1, {})['test_name'] = str(_test_name)
if (self.rerun is not None) and (max_rerun() is not None):
self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests',
{}).setdefault(
len(_scenario) - 1, {})['rerun'] = str(self.rerun)
else:
self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests',
{}).setdefault(
len(_scenario) - 1, {})['rerun'] = '0'
def generate_screenshot_data(self):
os.makedirs(screen_base + '/pytest_screenshots', exist_ok=True)
_screenshot_name = round(time.time())
_screenshot_suite_name = _suite_name.split('/')[-1:][0].replace('.py', '')
_screenshot_test_name = _test_name
if len(_test_name) >= 19: _screenshot_test_name = _test_name[-17:]
_screenshot_error = _current_error
screen_img.save(
screen_base + '/pytest_screenshots/' + str(_screenshot_name) + '.png'
)
# attach screenshots
self.attach_screenshots(_screenshot_name, _screenshot_suite_name, _screenshot_test_name, _screenshot_error)
_screenshot_name = ''
_screenshot_suite_name = ''
_screenshot_test_name = ''
_screenshot_error = ''
def append_suite_metrics_row(self, name):
global _spass_tests, _sfail_tests, _sskip_tests, _sxpass_tests, _sxfail_tests, _serror_tests, _srerun_tests, \
_error, _suite_error, _suite_fail
self._test_names(_test_name, clear='yes')
self._test_suites(name)
self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[
'total_pass'] = int(_spass_tests)
self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[
'total_skip'] = int(_sskip_tests)
self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[
'total_xpass'] = int(_sxpass_tests)
self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[
'total_xfail'] = int(_sxfail_tests)
if (self.rerun is not None) and (max_rerun() is not None):
_base_suite = self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {})['tests']
for i in _base_suite:
_srerun_tests += int(_base_suite[int(i)]['rerun'])
self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[
'total_rerun'] = int(_srerun_tests)
else:
self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[
'total_rerun'] = 0
for i in self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {})['tests']:
if 'ERROR' in self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {})['tests'][i][
'status']:
_suite_error += 1
elif 'FAIL' == self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {})['tests'][i][
'status']:
_suite_fail += 1
self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[
'total_fail'] = _suite_fail
self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[
'total_error'] = _suite_error
suite_row_text = """
<tr>
<td style="word-wrap: break-word;max-width: 200px; white-space: normal; text-align:left">__sname__</td>
<td>__spass__</td>
<td>__sfail__</td>
<td>__sskip__</td>
<td>__sxpass__</td>
<td>__sxfail__</td>
<td>__serror__</td>
<td>__srerun__</td>
</tr>
"""
suite_row_text = suite_row_text.replace("__sname__", str(name))
suite_row_text = suite_row_text.replace("__spass__", str(_spass_tests))
suite_row_text = suite_row_text.replace("__sfail__", str(_suite_fail))
suite_row_text = suite_row_text.replace("__sskip__", str(_sskip_tests))
suite_row_text = suite_row_text.replace("__sxpass__", str(_sxpass_tests))
suite_row_text = suite_row_text.replace("__sxfail__", str(_sxfail_tests))
suite_row_text = suite_row_text.replace("__serror__", str(_suite_error))
suite_row_text = suite_row_text.replace("__srerun__", str(_srerun_tests))
global _suite_metrics_content
_suite_metrics_content += suite_row_text
self._test_passed(int(_spass_tests))
self._test_failed(int(_suite_fail))
self._test_skipped(int(_sskip_tests))
self._test_xpassed(int(_sxpass_tests))
self._test_xfailed(int(_sxfail_tests))
self._test_error(int(_suite_error))
_spass_tests = 0
_sfail_tests = 0
_sskip_tests = 0
_sxpass_tests = 0
_sxfail_tests = 0
_serror_tests = 0
_srerun_tests = 0
_suite_fail = 0
_suite_error = 0
def set_initial_trigger(self):
global _initial_trigger
_initial_trigger = False
def update_previous_suite_name(self):
global _previous_suite_name
_previous_suite_name = _suite_name
def update_counts(self, rep):
global _sfail_tests, _spass_tests, _sskip_tests, _serror_tests, _sxfail_tests, _sxpass_tests
if rep.when == "call" and rep.passed:
if hasattr(rep, "wasxfail"):
_sxpass_tests += 1
else:
_spass_tests += 1
if rep.failed:
if getattr(rep, "when", None) == "call":
if hasattr(rep, "wasxfail"):
_sxpass_tests += 1
else:
_sfail_tests += 1
else:
pass
if rep.skipped:
if hasattr(rep, "wasxfail"):
_sxfail_tests += 1
else:
_sskip_tests += 1
def update_test_error(self, msg):
global _current_error
_current_error = msg
def update_test_status(self, status):
global _test_status
_test_status = status
def increment_xpass(self):
global _xpass
_xpass += 1
def increment_xfail(self):
global _xfail
_xfail += 1
def increment_pass(self):
global _pass
_pass += 1
def increment_fail(self):
global _fail
_fail += 1
def increment_skip(self):
global _skip
_skip += 1
def increment_error(self):
global _error, _serror_tests
_error += 1
_serror_tests += 1
def _date(self):
return date.today().strftime("%B %d, %Y")
def _test_suites(self, name):
global _test_suite_name
_test_suite_name.append(name.split('/')[-1].replace('.py', ''))
def _test_names(self, name, **kwargs):
global _scenario
if (self.rerun is None) or (max_rerun() is None): _scenario.append(name)
try:
if kwargs['clear'] == 'yes': _scenario = []
except Exception:
pass
def _test_passed(self, value):
global _test_pass_list
_test_pass_list.append(value)
def _test_failed(self, value):
global _test_fail_list
_test_fail_list.append(value)
def _test_skipped(self, value):
global _test_skip_list
_test_skip_list.append(value)
def _test_xpassed(self, value):
global _test_xpass_list
_test_xpass_list.append(value)
def _test_xfailed(self, value):
global _test_xfail_list
_test_xfail_list.append(value)
def _test_error(self, value):
global _test_error_list
_test_error_list.append(value)
def renew_template_text(self, logo_url):
template_text = html_template()
template_text = template_text.replace("__custom_logo__", logo_url)
template_text = template_text.replace("__execution_time__", str(_execution_time))
template_text = template_text.replace("__title__", _title)
# template_text = template_text.replace("__executed_by__", str(platform.uname()[1]))
# template_text = template_text.replace("__os_name__", str(platform.uname()[0]))
# template_text = template_text.replace("__python_version__", str(sys.version.split(' ')[0]))
# template_text = template_text.replace("__generated_date__", str(datetime.datetime.now().strftime("%b %d %Y, %H:%M")))
template_text = template_text.replace("__total__",
str(_aspass + _asfail + _asskip + _aserror + _asxpass + _asxfail))
template_text = template_text.replace("__executed__", str(_executed))
template_text = template_text.replace("__pass__", str(_aspass))
template_text = template_text.replace("__fail__", str(_asfail))
template_text = template_text.replace("__skip__", str(_asskip))
template_text = template_text.replace("__error__", str(_aserror))
template_text = template_text.replace("__xpass__", str(_asxpass))
template_text = template_text.replace("__xfail__", str(_asxfail))
template_text = template_text.replace("__rerun__", str(_asrerun))
template_text = template_text.replace("__suite_metrics_row__", str(_suite_metrics_content))
template_text = template_text.replace("__test_metrics_row__", str(_test_metrics_content))
template_text = template_text.replace("__date__", str(self._date()))
template_text = template_text.replace("__test_suites__", str(_test_suite_name))
template_text = template_text.replace("__test_suite_length__", str(len(_test_suite_name)))
template_text = template_text.replace("__test_suite_pass__", str(_test_pass_list))
template_text = template_text.replace("__test_suites_fail__", str(_test_fail_list))
template_text = template_text.replace("__test_suites_skip__", str(_test_skip_list))
template_text = template_text.replace("__test_suites_xpass__", str(_test_xpass_list))
template_text = template_text.replace("__test_suites_xfail__", str(_test_xfail_list))
template_text = template_text.replace("__test_suites_error__", str(_test_error_list))
template_text = template_text.replace("__archive_status__", str(_archive_tab_content))
template_text = template_text.replace("__archive_body_content__", str(_archive_body_content))
template_text = template_text.replace("__archive_count__", str(_archive_count))
template_text = template_text.replace("__archives__", str(archives))
template_text = template_text.replace("__max_failure_suite_name_final__", str(max_failure_suite_name_final))
template_text = template_text.replace("__max_failure_suite_count__", str(max_failure_suite_count))
template_text = template_text.replace("__similar_max_failure_suite_count__",
str(similar_max_failure_suite_count))
template_text = template_text.replace("__max_failure_total_tests__", str(max_failure_total_tests))
template_text = template_text.replace("__max_failure_percent__", str(max_failure_percent))
template_text = template_text.replace("__trends_label__", str(trends_label))
template_text = template_text.replace("__tpass__", str(tpass))
template_text = template_text.replace("__tfail__", str(tfail))
template_text = template_text.replace("__tskip__", str(tskip))
template_text = template_text.replace("__attach_screenshot_details__", str(_attach_screenshot_details))
return template_text
def generate_json_data(self, base):
global _asskip, _aserror, _aspass, _asfail, _asxpass, _asxfail, _asrerun
self.json_data['date'] = self._date()
self.json_data['start_time'] = _start_execution_time
self.json_data['total_suite'] = len(_test_suite_name)
suite = self.json_data['content']['suites']
for i in suite:
for k in self.json_data['content']['suites'][i]['status']:
if (k == 'total_fail' or k == 'total_error') and self.json_data['content']['suites'][i]['status'][
k] != 0:
self.json_data['status'] = "FAIL"
break
else:
continue
try:
if self.json_data['status'] == "FAIL": break
except KeyError:
if len(_test_suite_name) == i + 1: self.json_data['status'] = "PASS"
for i in suite:
for k in self.json_data['content']['suites'][i]['status']:
if k == 'total_pass':
_aspass += self.json_data['content']['suites'][i]['status'][k]
elif k == 'total_fail':
_asfail += self.json_data['content']['suites'][i]['status'][k]
elif k == 'total_skip':
_asskip += self.json_data['content']['suites'][i]['status'][k]
elif k == 'total_error':
_aserror += self.json_data['content']['suites'][i]['status'][k]
elif k == 'total_xpass':
_asxpass += self.json_data['content']['suites'][i]['status'][k]
elif k == 'total_xfail':
_asxfail += self.json_data['content']['suites'][i]['status'][k]
elif k == 'total_rerun':
_asrerun += self.json_data['content']['suites'][i]['status'][k]
_astotal = _aspass + _asfail + _asskip + _aserror + _asxpass + _asxfail
self.json_data.setdefault('status_list', {})['pass'] = str(_aspass)
self.json_data.setdefault('status_list', {})['fail'] = str(_asfail)
self.json_data.setdefault('status_list', {})['skip'] = str(_asskip)
self.json_data.setdefault('status_list', {})['error'] = str(_aserror)
self.json_data.setdefault('status_list', {})['xpass'] = str(_asxpass)
self.json_data.setdefault('status_list', {})['xfail'] = str(_asxfail)
self.json_data.setdefault('status_list', {})['rerun'] = str(_asrerun)
self.json_data['total_tests'] = str(_astotal)
with open(base + '/output.json', 'w') as outfile:
json.dump(self.json_data, outfile)
def update_archives_template(self, base):
global _archive_count
f = glob.glob(base + '/archive/*.json')
cf = glob.glob(base + '/output.json')
if len(f) > 0:
_archive_count = len(f) + 1
self.load_archive(cf, value='current')
f.sort(reverse=True)
self.load_archive(f, value='history')
else:
_archive_count = 1
self.load_archive(cf, value='current')
def load_archive(self, f, value):
global archive_pass, archive_fail, archive_skip, archive_xpass, archive_xfail, archive_error, archives
def state(data):
if data == 'fail':
return 'times', '#fc6766'
elif data == 'pass':
return 'check', '#98cc64'
for i, val in enumerate(f):
with open(val) as json_file:
data = json.load(json_file)
suite_highlights(data)
archive_row_text = """
<a class ="list-group-item list-group-item-action" href="#list-item-__acount__" style="font-size: 1.1rem; color: dimgray; margin-bottom: -7%;">
<i class="fa fa-__astate__" aria-hidden="true" style="color: __astate_color__"></i>
<span>__astatus__</span></br>
<span style="font-size: 0.81rem; color: gray; padding-left: 12%;">__adate__</span>
</a>
"""
archive_row_text = archive_row_text.replace("__astate__", state(data['status'].lower())[0])
archive_row_text = archive_row_text.replace("__astate_color__", state(data['status'].lower())[1])
if value == "current":
archive_row_text = archive_row_text.replace("__astatus__", 'build #' + str(_archive_count))
archive_row_text = archive_row_text.replace("__acount__", str(_archive_count))
else:
archive_row_text = archive_row_text.replace("__astatus__", 'build #' + str(len(f) - i))
archive_row_text = archive_row_text.replace("__acount__", str(len(f) - i))
adate = datetime.strptime(
data['date'].split(None, 1)[0][:1 + 2:] + ' ' +
data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y"
)
atime = \
"".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit(
':',
1)[0]
archive_row_text = archive_row_text.replace("__adate__",
str(adate.date()) + ' | ' + str(time_converter(atime)))
global _archive_tab_content
_archive_tab_content += archive_row_text
_archive_body_text = """
<div id="list-item-__acount__" class="archive-body">
<div>
<h4 class="archive-header">
Build #__acount__
</h4>
<div class="archive-date">
<i class="fa fa-calendar-check-o" aria-hidden="true"></i>
__date__
</div>
</div>
<div style="margin-top: -5%;">
<div id="archive-container-__iloop__" style="padding-top: 5%; position: absolute;">
<div style="">
<span class="total__tests">__total_tests__</span>
</div>
<div id="archive-label-__iloop__">
<span class="archive__label">TEST CASES</span>
</div>
</div>
<div class="archive-chart-container">
<canvas id="archive-chart-__iloop__" style="margin-top: 10%; padding-left: 25%; margin-right: -16%; float: right;"></canvas>
</div>
</div>
<div class="archive__bar">
<section id="statistic" class="statistic-section-__status__ one-page-section">
<div class="container" style="margin-top: -2%;">
<div class="row text-center">
<div class="col-xs-12 col-md-3" style="max-width: 14.2%;">
<div class="counter">
<h2 class="timer count-title count-number">__pass__</h2>
<p class="stats-text">PASSED</p>
</div>
</div>
<div class="col-xs-12 col-md-3" style="max-width: 14.2%;">
<div class="counter">
<h2 class="timer count-title count-number">__fail__
</h2>
<p class="stats-text">FAILED</p>
</div>
</div>
<div class="col-xs-12 col-md-3" style="max-width: 14.2%;"v>
<div class="counter">
<h2 class="timer count-title count-number">__skip__</h2>
<p class="stats-text">SKIPPED</p>
</div>
</div>
<div class="col-xs-12 col-md-3" style="max-width: 14.2%;">
<div class="counter">
<h2 class="timer count-title count-number">__xpass__</h2>
<p class="stats-text">XPASSED</p>
</div>
</div>
<div class="col-xs-12 col-md-3" style="max-width: 14.2%;">
<div class="counter">
<h2 class="timer count-title count-number">__xfail__</h2>
<p class="stats-text">XFAILED</p>
</div>
</div>
<div class="col-xs-12 col-md-3" style="max-width: 14.2%;">
<div class="counter">
<h2 class="timer count-title count-number">__error__</h2>
<p class="stats-text">ERROR</p>
</div>
</div>
<div class="col-xs-12 col-md-3" style="max-width: 14.2%;">
<div class="counter">
<h2 class="timer count-title count-number">__rerun__</h2>
<p class="stats-text">RERUN</p>
</div>
</div>
</div>
</div>
</section>
</div>
</div>
"""
if value == "current":
_archive_body_text = _archive_body_text.replace("__iloop__", str(i))
_archive_body_text = _archive_body_text.replace("__acount__", str(_archive_count))
else:
_archive_body_text = _archive_body_text.replace("__iloop__", str(i + 1))
_archive_body_text = _archive_body_text.replace("__acount__", str(len(f) - i))
_archive_body_text = _archive_body_text.replace("__total_tests__", data['total_tests'])
_archive_body_text = _archive_body_text.replace("__date__", data['date'].upper())
_archive_body_text = _archive_body_text.replace("__pass__", data['status_list']['pass'])
_archive_body_text = _archive_body_text.replace("__fail__", data['status_list']['fail'])
_archive_body_text = _archive_body_text.replace("__skip__", data['status_list']['skip'])
_archive_body_text = _archive_body_text.replace("__xpass__", data['status_list']['xpass'])
_archive_body_text = _archive_body_text.replace("__xfail__", data['status_list']['xfail'])
_archive_body_text = _archive_body_text.replace("__error__", data['status_list']['error'])
try:
_archive_body_text = _archive_body_text.replace("__rerun__", data['status_list']['rerun'])
except KeyError:
_archive_body_text = _archive_body_text.replace("__rerun__", '0')
_archive_body_text = _archive_body_text.replace("__status__", data['status'].lower())
index = i
if value != "current": index = i + 1
archives.setdefault(str(index), {})['pass'] = data['status_list']['pass']
archives.setdefault(str(index), {})['fail'] = data['status_list']['fail']
archives.setdefault(str(index), {})['skip'] = data['status_list']['skip']
archives.setdefault(str(index), {})['xpass'] = data['status_list']['xpass']
archives.setdefault(str(index), {})['xfail'] = data['status_list']['xfail']
archives.setdefault(str(index), {})['error'] = data['status_list']['error']
try:
archives.setdefault(str(index), {})['rerun'] = data['status_list']['rerun']
except KeyError:
archives.setdefault(str(index), {})['rerun'] = '0'
archives.setdefault(str(index), {})['total'] = data['total_tests']
global _archive_body_content
_archive_body_content += _archive_body_text
def update_trends(self, base):
global tpass, tfail, tskip
f2 = glob.glob(base + '/output.json')
with open(f2[0]) as json_file:
data = json.load(json_file)
adate = datetime.strptime(
data['date'].split(None, 1)[0][:1 + 2:] + ' ' +
data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y"
)
atime = \
"".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit(
':',
1)[0]
trends_label.append(str(time_converter(atime)).upper() + ' | ' + str(adate.date().strftime("%b")) + ' '
+ str(adate.date().strftime("%d")))
tpass.append(data['status_list']['pass'])
tfail.append(int(data['status_list']['fail']) + int(data['status_list']['error']))
tskip.append(data['status_list']['skip'])
f = glob.glob(base + '/archive' + '/*.json')
f.sort(reverse=True)
for i, val in enumerate(f):
with open(val) as json_file:
data = json.load(json_file)
adate = datetime.strptime(
data['date'].split(None, 1)[0][:1 + 2:] + ' ' +
data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y"
)
atime = \
"".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit(
':',
1)[0]
trends_label.append(str(time_converter(atime)).upper() + ' | ' + str(adate.date().strftime("%b")) + ' '
+ str(adate.date().strftime("%d")))
tpass.append(data['status_list']['pass'])
tfail.append(int(data['status_list']['fail']) + int(data['status_list']['error']))
tskip.append(data['status_list']['skip'])
if i == 4: break
def attach_screenshots(self, screen_name, test_suite, test_case, test_error):
global _attach_screenshot_details
_screenshot_details = """
<div class="img-hover col-md-6 col-xl-3 p-3">
<div>
<a class="video" href="__screenshot_base__/pytest_screenshots/__screen_name__.png" data-toggle="lightbox" style="background-image: url('__screenshot_base__/pytest_screenshots/__screen_name__.png');" data-fancybox="images" data-caption="SUITE: __ts__ :: SCENARIO: __tc__">
<span class="video-hover-desc video-hover-small"> <span style="font-size:23px;display: block;margin-bottom: 15px;"> __tc__</span>
<span>__te__</span> </span>
</a>
<p class="text-desc"><strong>__ts__</strong><br />
__te__</p>
</div>
</div>
<div class="desc-video-none">
<div class="desc-video" id="Video-desc-01">
<h2>__tc__</h2>
<p><strong>__ts__</strong><br />
__te__</p>
</div>
</div>
"""
if len(test_case) == 17: test_case = '..' + test_case
_screenshot_details = _screenshot_details.replace("__screen_name__", str(screen_name))
_screenshot_details = _screenshot_details.replace("__ts__", str(test_suite))
_screenshot_details = _screenshot_details.replace("__tc__", str(test_case))
_screenshot_details = _screenshot_details.replace("__te__", str(test_error))
_screenshot_details = _screenshot_details.replace("__screenshot_base__", str(screen_base))
_attach_screenshot_details += _screenshot_details
|
nilq/baby-python
|
python
|
import datetime
import logging
import time
import googleapiclient
class TaskList:
def __init__(self, id):
self.id = id
self.tasks = []
def update(self, service):
try:
results = service.tasks().list(tasklist = self.id, showCompleted = False, dueMax = rfc3339_today_midnight()).execute()
except googleapiclient.errors.HttpError as e:
logging.warning(e)
logging.warning('Could not update task list.')
return
items = results.get('items')
self.tasks = []
if not items: # empty list do nothing
pass
else:
for item in items:
self.tasks.append(item['title'])
def delete_completed_tasks(self, service):
results = service.tasks().list(tasklist = self.id, showCompleted = True, showHidden = True).execute()
items = results.get('items')
if not items: # empty list do nothing
pass
else:
for item in items:
# if the task has been completed delete it
if item['status'] == 'completed':
service.tasks().delete(tasklist = self.id, task = item['id']).execute()
def rfc3339_today_midnight():
now = datetime.datetime.now()
dt = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0).isoformat()
timezone = int(time.timezone / 3600.0)
if timezone < 0:
dt = dt + '-'
if abs(timezone) < 10:
dt = dt + '0' + str( abs(timezone) ) + ':00'
else:
dt = dt + str( abs(timezone) ) + ':00'
return dt
|
nilq/baby-python
|
python
|
from datetime import *
from dateutil.relativedelta import *
now = datetime.now()
print(now)
now = now + relativedelta(months=1, weeks=1, hour=10)
print(now)
|
nilq/baby-python
|
python
|
FLASK_HOST = '0.0.0.0'
FLASK_PORT = 5000
FLASK_DEBUG = False
FLASK_THREADED = True
import os
ENV_SETUP = os.getenv('MONGO_DATABASE', None) is not None
MONGO_DATABASE = os.getenv('MONGO_DATABASE', 'avoid_kuvid')
MONGO_ROOT_USERNAME = os.getenv('MONGO_ROOT_USERNAME', 'admin')
MONGO_ROOT_PASSWORD = os.getenv('MONGO_ROOT_PASSWORD', 'admin')
MONGO_API = f'mongodb://{MONGO_ROOT_USERNAME}:{MONGO_ROOT_PASSWORD}@db:27017/{MONGO_DATABASE}' if ENV_SETUP else "mongodb://localhost:27017/"
TIME_FORMAT = '%H:%M'
DATE_FORMAT = f'%Y-%m-%d {TIME_FORMAT}'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Author: Marc-Antoine
# @Date: 2019-03-17 17:18:42
# @Last Modified by: Marc-Antoine Belanger
# @Last Modified time: 2019-03-17 17:20:31
from gym.envs.registration import register
register(
id='cribbage-v0',
entry_point='gym_cribbage.envs:CribbageEnv',
)
|
nilq/baby-python
|
python
|
# coding:utf-8
# 2019/9/3
"""
给定一个整数的数组,找出其中的pair(a, b),使得a+b=0,并返回这样的pair数目。(a, b)和(b, a)是同一组。
输入
整数数组
输出
找到的pair数目
样例输入
-1, 2, 4, 5, -2
样例输出
1
"""
def solver(nums):
maps = {}
ret = 0
retList = []
for n in nums:
if n in maps:
if maps[n] == 1:
if n not in retList and -n not in retList:
retList.append(n)
ret += 1
maps[-n] = maps.get(-n, 0) + 1
# print(maps, retList)
return ret
def test():
nums = [0,0,0, -1, 1, -1, 1]
ret = solver(nums)
print(ret)
def inputs():
nums = list(map(int, input().strip().split(" ")))
ret = solver(nums)
print(ret)
if __name__ == '__main__':
test()
|
nilq/baby-python
|
python
|
class UserErrorMessage(object):
OPERATION_NOT_SUPPORTED = "Operation is not supported."
NO_MODEL_PUBLISHED = "No model published for the current API."
NO_ENDPOINT_PUBLISHED = "No service endpoint published in the current API."
NO_OPERATION_PUBLISHED = "No operation published in the current API."
CAN_NOT_CONNECT_TO_MODEL_REPO = "Can not connect to the model repository. Contact the publisher to correct the error."
NOT_IMPLEMENTED = "{} is not supported."
OPERATION_NOT_IN_STATUS = "Operation {} is not in {} status."
INVALID_CERT = 'Invalid certificate.'
INVALID_API_KEY = 'The api key is invalid.'
API_NOT_EXIST = 'The API {} in application {} does not exist or you do not have permission to access it.'
SUBSCRIPTION_NOT_EXIST = "The subscription {} doesn't exist or api key is invalid."
API_VERSION_NOT_EXIST = "The specified API or API version does not exist or you do not have permission to access it."
API_VERSION_REQUIRED = "The api-version query parameter is required."
AAD_TOKEN_REQUIRED = "AAD token is required."
INTERNAL_SERVER_ERROR = "The server encountered an internal error and was unable to complete your request."
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import Http404, HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from accounts.models import User, Authority
from common.constants import GROUP_WORKING_TYPE_ADMINSTRATION_AREA
from common.decorators import superuser_required
from logs.functions import list_for_content_type, list_for_object, list_for_user
from reports.models import AdministrationArea, Report, ReportInvestigation, ReportLaboratoryCase, AnimalLaboratoryCause, \
AnimalLaboratoryCause
from reports.serializers import AnimalLaboratoryCauseSerializer
from supervisors.forms import SupervisorsUserForm, SupervisorsAuthorityForm, SupervisorsReportInvestigationForm, \
SupervisorsReportLaboratoryCaseForm
from supervisors.functions import (get_querystring_filter_user_status,
export_excel_users_to_create_authorities,
import_authorities_excel, import_and_excel_users_to_create_authorities,
print_invite_code_authorities)
@login_required
@superuser_required
def supervisors_home(request):
return redirect('supervisors_users')
@login_required
# @superuser_required
def supervisors_users(request):
if request.user.is_superuser:
return redirect('supervisors_users_by_status', user_status='volunteer')
return redirect('supervisors_report_investigation')
# return render(request, 'supervisors/supervisors_users_list.html', {
# 'areas': AdministrationArea.get_root_nodes(),
# 'status': 'users',
# })
@login_required
@superuser_required
def supervisors_users_by_status(request, user_status):
if user_status not in ['volunteer', 'podd', 'livestock', 'public-health', 'additional-volunteer', 'additional-volunteer-dodd']:
raise Http404
querystring = get_querystring_filter_user_status({}, user_status)
return render(request, 'supervisors/supervisors_users_list.html', {
'status': user_status,
'users': User.objects.filter(**querystring).order_by('username'),
})
def supervisors_export_users_excel_to_authorities(request):
return export_excel_users_to_create_authorities()
@login_required
@superuser_required
def supervisors_authorities(request):
success = None
error = None
if request.method == 'POST':
file = request.FILES.get('file')
if file:
success = import_authorities_excel(file)
if success:
messages.success(request, u'สร้างองค์กรใหม่สำเร็จ')
else:
messages.error(request, u'ไม่สามารถสร้างองค์กรใหม่สำเร็จ ไฟล์ไม่ถูกต้อง')
return render(request, 'supervisors/supervisors_authorities_list.html', {
'authorities': Authority.objects.order_by('code'),
})
@login_required
@superuser_required
def supervisors_new_authorities(request):
response = {}
if request.method == 'POST':
file = request.FILES.get('file')
if file:
return import_and_excel_users_to_create_authorities(file)
return HttpResponse('False')
@login_required
@superuser_required
def supervisors_authorities_print_invitation_code(request):
return print_invite_code_authorities()
@login_required
@superuser_required
def supervisors_authorities_edit(request, authority_id):
authority = get_object_or_404(Authority, id=authority_id)
if request.method == 'POST':
form = SupervisorsAuthorityForm(request.POST, instance=authority)
if form.is_valid():
form.save()
messages.success(request, u'แก้ไขข้อมูลเรียบร้อยแล้ว')
else:
form = SupervisorsAuthorityForm(instance=authority)
return render(request, 'supervisors/supervisors_authorities_form.html', {
'authority': authority,
'form': form,
})
@login_required
@superuser_required
def supervisors_users_by_area(request, area_id):
return redirect('supervisors_users_by_area_and_status', user_status='volunteer', area_id=area_id)
# area = get_object_or_404(AdministrationArea, id=area_id)
# return render(request, 'supervisors/supervisors_users_list.html', {
# 'areas': [area],
# 'selected_area': area,
# 'status': 'users',
# })
@login_required
@superuser_required
def supervisors_users_by_area_and_status(request, user_status, area_id):
if user_status not in ['volunteer', 'podd', 'livestock', 'public-health']:
raise Http404
area = get_object_or_404(AdministrationArea, id=area_id)
querystring = {
'groups__groupadministrationarea__administration_area': area,
'groups__type': GROUP_WORKING_TYPE_ADMINSTRATION_AREA,
}
querystring = get_querystring_filter_user_status(querystring, user_status)
return render(request, 'supervisors/supervisors_users_list.html', {
'areas': [area],
'selected_area': area,
'status': user_status,
'users': User.objects.filter(**querystring).order_by('username'),
})
@login_required
@superuser_required
def supervisors_users_edit(request, user_id):
user = get_object_or_404(User, id=user_id)
if request.method == 'POST':
form = SupervisorsUserForm(request.POST, instance=user)
if form.is_valid():
form.save(created_by=request.user)
messages.success(request, u'แก้ไขข้อมูลเรียบร้อยแล้ว')
else:
form = SupervisorsUserForm(instance=user)
return render(request, 'supervisors/supervisors_users_form.html', {
'user': user,
'form': form,
})
@login_required
@superuser_required
def supervisors_logs_reports(request):
logs = list_for_content_type(Report)
paginator = Paginator(logs, 25)
page = request.GET.get('page')
try:
logs = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
logs = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
logs = paginator.page(paginator.num_pages)
return render(request, 'supervisors/supervisors_logs_reports.html', {
'logs': logs
})
@login_required
@superuser_required
def supervisors_logs_reports_by_report(request, report_id):
report = get_object_or_404(Report, pk=report_id)
logs = list_for_object(report)
paginator = Paginator(logs, 25)
page = request.GET.get('page')
try:
logs = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
logs = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
logs = paginator.page(paginator.num_pages)
return render(request, 'supervisors/supervisors_logs_reports.html', {
'logs': logs,
'item': report,
'log_header': u'Report #%d' % report.id,
})
@login_required
@superuser_required
def supervisors_logs_reports_by_user(request, user_id):
user = get_object_or_404(User, pk=user_id)
logs = list_for_user(user)
paginator = Paginator(logs, 25)
page = request.GET.get('page')
try:
logs = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
logs = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
logs = paginator.page(paginator.num_pages)
return render(request, 'supervisors/supervisors_logs_reports.html', {
'logs': logs,
'item': user,
'log_header': u'User %s' % user.username,
})
@login_required
@superuser_required
def supervisors_logs_users(request):
logs = list_for_content_type(User)
paginator = Paginator(logs, 25)
page = request.GET.get('page')
try:
logs = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
logs = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
logs = paginator.page(paginator.num_pages)
return render(request, 'supervisors/supervisors_logs_users.html', {
'logs': logs
})
@login_required
@superuser_required
def supervisors_logs_user(request, user_id):
user = get_object_or_404(User, pk=user_id)
logs = list_for_object(user)
paginator = Paginator(logs, 25)
page = request.GET.get('page')
try:
logs = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
logs = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
logs = paginator.page(paginator.num_pages)
return render(request, 'supervisors/supervisors_logs_users.html', {
'logs': logs,
'item': user,
'log_header': u'%s' % user.username,
})
@login_required
# @superuser_required
def supervisors_report_investigation_create(request):
if request.method == 'POST':
form = SupervisorsReportInvestigationForm(request.POST, request.FILES)
if form.is_valid():
investigation = ReportInvestigation(
domain=form.cleaned_data['report'].domain,
report=form.cleaned_data['report'],
note=form.cleaned_data['note'],
investigation_date=form.cleaned_data['investigation_date'],
result=form.cleaned_data['result'],
file=form.cleaned_data['file'],
created_by=request.user,
updated_by=request.user
)
investigation.save()
messages.success(request, u'เพิ่มรายการสืบสวนโรคสำเร็จ')
return redirect('supervisors_report_investigation')
else:
form = SupervisorsReportInvestigationForm()
return render(request, 'supervisors/supervisors_report_investigation_form.html', {
'form': form,
})
@login_required
# @superuser_required
def supervisors_report_investigation_edit(request, investigation_id):
investigation = get_object_or_404(ReportInvestigation, id=investigation_id)
if request.method == 'POST':
form = SupervisorsReportInvestigationForm(request.POST, request.FILES)
if form.is_valid():
investigation.report = form.cleaned_data['report']
investigation.note = form.cleaned_data['note']
investigation.investigation_date = form.cleaned_data['investigation_date']
investigation.result =form.cleaned_data['result']
if form.cleaned_data['file']:
investigation.file = form.cleaned_data['file']
investigation.updated_by = request.user
investigation.save()
messages.success(request, u'แก้ไขการสืบสวนโรค #%s สำเร็จ' % investigation.id)
return redirect('supervisors_report_investigation')
else:
form = SupervisorsReportInvestigationForm(initial={
'report': investigation.report.id,
'note': investigation.note,
'investigation_date': investigation.investigation_date,
'result': 1 if investigation.result else 0,
})
return render(request, 'supervisors/supervisors_report_investigation_form.html', {
'form': form,
'file': investigation.file,
'investigation': investigation,
'edit': True
})
@login_required
# @superuser_required
def supervisors_report_investigation(request):
investigation_list = ReportInvestigation.objects.order_by('-investigation_date')
paginator = Paginator(investigation_list, 100)
page = request.GET.get('page')
try:
investigations = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
investigations = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
investigations = paginator.page(paginator.num_pages)
return render(request, 'supervisors/supervisors_report_investigation_list.html', {
'investigations': investigations
})
@login_required
# @superuser_required
def supervisors_report_investigation_delete(request, investigation_id):
investigation = get_object_or_404(ReportInvestigation, id=investigation_id)
investigation.delete()
messages.success(request, u'ลบรายการสำเร็จ')
return redirect('supervisors_report_investigation')
@login_required
# @superuser_required
def supervisors_report_laboratory(request):
case_list = ReportLaboratoryCase.objects.order_by('-id')
paginator = Paginator(case_list, 100)
page = request.GET.get('page')
try:
cases = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
cases = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
cases = paginator.page(paginator.num_pages)
return render(request, 'supervisors/supervisors_report_lab_case_list.html', {
'cases': cases
})
@login_required
# @superuser_required
def supervisors_report_laboratory_create(request):
if request.method == 'POST':
data = request.POST.copy()
data['created_by'] = request.user.id
data['updated_by'] = request.user.id
form = SupervisorsReportLaboratoryCaseForm(data)
if form.is_valid():
instance = form.save()
messages.success(request, u'เพิ่มรายการผลแลปสำเร็จ')
return redirect('supervisors_report_laboratory_edit', instance.id)
else:
form = SupervisorsReportLaboratoryCaseForm()
return render(request, 'supervisors/supervisors_report_lab_case_form.html', {
'form': form,
})
@login_required
# @superuser_required
def supervisors_report_laboratory_edit(request, case_id):
case = get_object_or_404(ReportLaboratoryCase, id=case_id)
if request.method == 'POST':
data = request.POST.copy()
data['created_by'] = request.user.id
data['updated_by'] = request.user.id
form = SupervisorsReportLaboratoryCaseForm(data, instance=case)
if form.is_valid():
form.save()
messages.success(request, u'แก้ไขผลแลป #%s สำเร็จ' % case.id)
return redirect('supervisors_report_laboratory')
else:
form = SupervisorsReportLaboratoryCaseForm(instance=case)
items = case.laboratory_items.order_by('sample_no')
files = case.laboratory_files.order_by('id')
causes = AnimalLaboratoryCause.objects.order_by('name')
import json
json_cause = json.dumps((AnimalLaboratoryCauseSerializer(causes, many=True).data))
return render(request, 'supervisors/supervisors_report_lab_case_form.html', {
'case': case,
'form': form,
'items': items,
'files': files,
'causes': causes,
'json_cause': json_cause,
'edit': True
})
@login_required
# @superuser_required
def supervisors_report_laboratory_delete(request, case_id):
case = get_object_or_404(ReportLaboratoryCase, id=case_id)
case.delete()
messages.success(request, u'ลบรายการสำเร็จ')
return redirect('supervisors_report_laboratory')
@login_required
# @superuser_required
def supervisors_report_laboratory_cause(request):
cause_list = AnimalLaboratoryCause.objects.order_by('name')
paginator = Paginator(cause_list, 100)
page = request.GET.get('page')
try:
causes = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
causes = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
causes = paginator.page(paginator.num_pages)
return render(request, 'supervisors/supervisors_report_lab_cause_list.html', {
'causes': causes
})
@login_required
# @superuser_required
def supervisors_report_laboratory_cause_delete(request, cause_id):
cause = get_object_or_404(AnimalLaboratoryCause, id=cause_id)
cause.delete()
messages.success(request, u'ลบรายการสำเร็จ')
return redirect('supervisors_report_laboratory_cause')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
from xumm.resource import XummResource
from typing import List
class UserTokenValidity(XummResource):
"""
Attributes:
model_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
required = {
'user_token': True,
'active': True,
'token_issued': True,
'token_expiration': True
}
model_types = {
'user_token': str,
'active': bool,
'token_issued': int,
'token_expiration': int
}
attribute_map = {
'user_token': 'user_token',
'active': 'active',
'token_issued': 'token_issued',
'token_expiration': 'token_expiration'
}
def refresh_from(cls, **kwargs):
"""Returns the dict as a model
:param kwargs: A dict.
:type: dict
:return: The UserToken of this UserToken. # noqa: E501
:rtype: UserToken
"""
cls.sanity_check(kwargs)
cls._user_token = None
cls._active = None
cls._token_issued = None
cls._token_expiration = None
cls.user_token = kwargs['user_token']
cls.active = kwargs['active']
cls.token_issued = kwargs['token_issued']
cls.token_expiration = kwargs['token_expiration']
return cls
@property
def user_token(cls) -> str:
"""Gets the user_token of this UserTokenValidity.
:return: The user_token of this UserTokenValidity.
:rtype: str
"""
return cls._user_token
@user_token.setter
def user_token(cls, user_token: str):
"""Sets the user_token of this UserTokenValidity.
:param user_token: The user_token of this UserTokenValidity.
:type user_token: str
"""
if user_token is None:
raise ValueError("Invalid value for `user_token`, must not be `None`") # noqa: E501
cls._user_token = user_token
@property
def active(cls) -> str:
"""Gets the active of this UserTokenValidity.
:return: The active of this UserTokenValidity.
:rtype: str
"""
return cls._active
@active.setter
def active(cls, active: str):
"""Sets the active of this UserTokenValidity.
:param active: The active of this UserTokenValidity.
:type active: str
"""
if active is None:
raise ValueError("Invalid value for `active`, must not be `None`") # noqa: E501
cls._active = active
@property
def token_issued(cls) -> int:
"""Gets the token_issued of this UserTokenValidity.
:return: The token_issued of this UserTokenValidity.
:rtype: int
"""
return cls._token_issued
@token_issued.setter
def token_issued(cls, token_issued: int):
"""Sets the token_issued of this UserTokenValidity.
:param token_issued: The token_issued of this UserTokenValidity.
:type token_issued: int
"""
if token_issued is None:
raise ValueError("Invalid value for `token_issued`, must not be `None`") # noqa: E501
cls._token_issued = token_issued
@property
def token_expiration(cls) -> int:
"""Gets the token_expiration of this UserTokenValidity.
:return: The token_expiration of this UserTokenValidity.
:rtype: int
"""
return cls._token_expiration
@token_expiration.setter
def token_expiration(cls, token_expiration: int):
"""Sets the token_expiration of this UserTokenValidity.
:param token_expiration: The token_expiration of this UserTokenValidity. # noqa: E501
:type token_expiration: int
"""
if token_expiration is None:
raise ValueError("Invalid value for `token_expiration`, must not be `None`") # noqa: E501
cls._token_expiration = token_expiration
class UserTokenResponse(XummResource):
"""
Attributes:
model_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
required = {
'tokens': True,
}
model_types = {
'tokens': list,
}
attribute_map = {
'tokens': 'tokens',
}
def refresh_from(cls, **kwargs):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UserTokenResponse of this UserTokenResponse. # noqa: E501
:rtype: UserTokenResponse
"""
cls.sanity_check(kwargs)
cls._tokens = None
cls.tokens = [UserTokenValidity(**t) for t in kwargs['tokens']]
@property
def tokens(cls) -> List[UserTokenValidity]:
"""Gets the tokens of this UserTokenResponse.
:return: The tokens of this UserTokenResponse.
:rtype: List[UserTokenValidity]
"""
return cls._tokens
@tokens.setter
def tokens(cls, tokens: List[UserTokenValidity]):
"""Sets the tokens of this UserTokenResponse.
:param tokens: The tokens of this UserTokenResponse.
:type tokens: List[UserTokenValidity]
"""
if tokens is None:
raise ValueError("Invalid value for `tokens`, must not be `None`") # noqa: E501
cls._tokens = tokens
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import unittest
import GT3
from tests.ShotBase import *
from matplotlib.axes._axes import Axes
from matplotlib.pyplot import Figure
class CommonFunctions(object):
"""Tests to see if a shot has the expected attributes typical for a fully run shot."""
def test_gt3_has_core(cls):
cls.assertTrue(hasattr(cls.plasma, "core"))
def test_gt3_has_iol(cls):
cls.assertTrue(hasattr(cls.plasma, "iol"))
def test_gt3_has_nbi(cls):
cls.assertTrue(hasattr(cls.plasma, "nbi"))
def test_gt3_has_rtrans(cls):
cls.assertTrue(hasattr(cls.plasma, "rtrans"))
class SingleNullRun(SingleLowerNullTest, CommonFunctions):
@classmethod
def setUpClass(cls):
super(SingleNullRun, cls).setUpClass()
cls.plasma.run_radial_transport()
class DoubleNullRun(DoubleNullTest, CommonFunctions):
@classmethod
def setUpClass(cls):
super(DoubleNullRun, cls).setUpClass()
cls.plasma.run_radial_transport()
class NegativeTriangularityRun(NegativeTriangularityTest, CommonFunctions):
@classmethod
def setUpClass(cls):
super(NegativeTriangularityRun, cls).setUpClass()
cls.plasma.run_radial_transport()
class RunModificationTest(SingleLowerNullTest):
def test_sol_exists(self):
self.plasma.run_SOL()
self.assertTrue(hasattr(self.plasma, "sol"))
self.assertIsInstance(self.plasma.sol, GT3.Sol)
def test_iol_exists(self):
self.plasma.run_IOL()
self.assertTrue(hasattr(self.plasma, "iol"))
self.assertIsInstance(self.plasma.iol, GT3.IOL)
def test_nbi_exists(self):
self.plasma.run_NBI()
self.assertTrue(hasattr(self.plasma, "nbi"))
self.assertIsInstance(self.plasma.nbi, GT3.BeamDeposition)
def test_rtrans_exists(self):
self.plasma.run_radial_transport()
self.assertTrue(hasattr(self.plasma, "rtrans"))
self.assertIsInstance(self.plasma.rtrans, GT3.RadialTransport)
class PlotCoreTest(DoubleNullTest):
@classmethod
def setUpClass(cls):
super(PlotCoreTest, cls).setUpClass()
import matplotlib.pyplot as plt
cls.plt = plt
cls.plasma.run_radial_transport()
cls.plt.ion()
def plot_tester(self, plotter, edge=False):
import inspect
args = inspect.getfullargspec(plotter)
if 'logPlot' in args and 'edge' in args:
fig = plotter(logPlot=True, edge=True)
elif 'logPlot' in args:
fig = plotter(logPlot=True)
elif 'edge' in args:
fig = plotter(edge=True)
else:
fig = plotter()
self.assertIsInstance(fig, (Figure, Axes))
self.plt.close(fig.get_figure())
def test_plot_core(self):
"""
Plot all plots in the Core module
"""
plot_vars = [self.plasma.core.n.i.fsa.plot,
self.plasma.core.n.e.fsa.plot,
self.plasma.core.n.n.s.plot2D,
self.plasma.core.n.n.t.plot2D,
self.plasma.core.n.n.tot.plot2D,
self.plasma.core.T.i.ev.plot2D,
self.plasma.core.T.i.J.plot2D,
self.plasma.core.T.i.kev.plot2D,
self.plasma.core.T.e.ev.plot2D,
self.plasma.core.T.i.ev.L.plot2D,
self.plasma.core.T.e.J.L.plot2D,
self.plasma.core.n.i.L.plot2D,
self.plasma.core.n.n.s.L.plot2D,
self.plasma.core.n.n.tot.L.plot2D,
self.plasma.core.v.D.pol.plot2D,
self.plasma.core.v.C.tor.plot2D]
for v in plot_vars:
self.plot_tester(v)
def test_plot_beams(self):
"""
Plot all plots in the NBI module
"""
plot_vars = [self.plasma.nbi.combined_beam_src_dens_lost.Snbi.plot,
self.plasma.nbi.combined_beam_src_dens_lost.Qnbi.plot,
self.plasma.nbi.combined_beam_src_dens_lost.Mnbi.plot,
self.plasma.nbi.combined_beam_src_kept.Snbi.plot,
self.plasma.nbi.combined_beam_src_kept.Qnbi.plot,
self.plasma.nbi.combined_beam_src_kept.Mnbi.plot]
for v in plot_vars:
self.plot_tester(v)
def test_plot_rtrans(self):
"""
Plot all plots in the Radial Transport module
"""
plot_vars = [self.plasma.rtrans.gamma.D.diff.plot,
self.plasma.rtrans.gamma.D.int.plot,
self.plasma.rtrans.gamma.e.int.plot,
self.plasma.rtrans.gamma.C.int.plot,
self.plasma.rtrans.gamma.plot_D,
self.plasma.rtrans.gamma.plot_C,
self.plasma.rtrans.plot_Q_sources,
self.plasma.rtrans.plot_S_sources,
self.plasma.rtrans.plot_chi_terms]
for v in plot_vars:
self.plot_tester(v)
@classmethod
def tearDownClass(cls):
cls.plt.clf()
cls.plt.close()
class PlotIOLTest(DoubleNullTest):
@classmethod
def setUpClass(cls):
super(PlotIOLTest, cls).setUpClass()
import matplotlib.pyplot as plt
cls.plt = plt
cls.plasma.run_IOL()
def test_plot_iol_F_i(self):
self.plasma.iol.plot_F_i(edge=True)
self.assertIsInstance(self.plasma.iol.plot_F_i(), Axes)
class GT3TestClassTest(unittest.TestCase, CommonFunctions):
@classmethod
def setUpClass(cls):
super(GT3TestClassTest, cls).setUpClass()
from GT3 import gt3
from GT3.TestBase.testbase import TestClass
cls.plasma = gt3(preparedInput=TestClass())
cls.plasma.run_radial_transport()
TestClass().print_summary()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import datetime
def current_year():
"""current_year
This method used to get the current year
"""
return datetime.date.today().year
|
nilq/baby-python
|
python
|
import streamlit as st
import pandas as pd
st.title("File uploader example")
st.write(
"""
This is an example of how to use a file uploader.
Here, we are simply going to upload a CSV file and display it.
It should serve as a minimal example
for you to jump off and do more complex things.
"""
)
st.header("Upload CSV")
csv_file = st.file_uploader(
label="Upload a CSV file", type=["csv"], encoding="utf-8"
)
if csv_file is not None:
data = pd.read_csv(csv_file)
st.dataframe(data)
st.header("Upload Images")
st.write(
"""
Below is another example, where we upload an image and display it.
"""
)
image_file = st.file_uploader(
label="Upload an image", type=["png", "jpg", "tiff"], encoding=None
)
if image_file is not None:
st.image(image_file)
|
nilq/baby-python
|
python
|
"""
ASGI config for scheduler project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
from os import environ
from django.core.asgi import get_asgi_application # type: ignore
environ.setdefault("DJANGO_SETTINGS_MODULE", "scheduler.settings")
application = get_asgi_application()
|
nilq/baby-python
|
python
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import datetime
import time
import os
import boto3
from datetime import timedelta
import random
# Tries to find an existing or free game session and return the IP and Port to the client
def lambda_handler(event, context):
sqs_client = boto3.client('sqs')
# 1. Check SQS Queue if there are sessions available
# Try to receive message from SQS queue
try:
response = sqs_client.receive_message(
QueueUrl=os.environ['SQS_QUEUE_URL'],
MaxNumberOfMessages=1,
VisibilityTimeout=15,
WaitTimeSeconds=1
)
message = response['Messages'][0]
print(message)
receipt_handle = message['ReceiptHandle']
connection_info = message['Body']
print(receipt_handle)
print("got session: " + connection_info)
connection_splitted = connection_info.split(":")
ip = connection_splitted[0]
port = connection_splitted[1]
print("IP: " + ip + " PORT: " + port)
# Delete received message from queue
sqs_client.delete_message(
QueueUrl=os.environ['SQS_QUEUE_URL'],
ReceiptHandle=receipt_handle
)
# Return result to client
return {
"statusCode": 200,
"body": json.dumps({ 'publicIP': ip, 'port': port })
}
except:
print("Failed getting a session from the SQS queue, will try claiming a new one")
# 2. If not, try to claim a new session through FleetIQ
client = boto3.client('gamelift')
response = client.claim_game_server(
GameServerGroupName='ExampleGameServerGroup',
)
print(response)
connection_info = response["GameServer"]["ConnectionInfo"]
try:
connection_splitted = connection_info.split(":")
ip = connection_splitted[0]
port = connection_splitted[1]
print("IP: " + ip + " PORT: " + port)
# Put a ticket in to the SQS for the next player (we match 1-v-1 sessions)
response = sqs_client.send_message(
QueueUrl=os.environ['SQS_QUEUE_URL'],
MessageBody=(
connection_info
)
)
print(response['MessageId'])
return {
"statusCode": 200,
"body": json.dumps({ 'publicIP': ip, 'port': port })
}
except:
print("Failed getting a new session")
# 3. Failed to find a server
return {
"statusCode": 500,
"body": json.dumps({ 'failed': 'couldnt find a free server spot'})
}
|
nilq/baby-python
|
python
|
import anachronos
from test.runner import http
class PingTest(anachronos.TestCase):
def test_ping(self):
res = http.get("/ping")
self.assertEqual(200, res.status_code)
self.assertEqual("Pong!", res.text)
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name="minigit",
version="1.0",
packages=["minigit"],
entry_points={"console_scripts": ["minigit = minigit.cli:main"]},
)
|
nilq/baby-python
|
python
|
__author__ = 'cvl'
class Domain_model():
def __init__(self, json_dict):
self.free_domains = json_dict['free_domains']
self.paid_domains = json_dict['paid_domains']
|
nilq/baby-python
|
python
|
import pandas as pd
class CurrentPositionStatusSettler:
def __init__(self, calculation_source):
self.__calculation_source = calculation_source
def settle_current_position_status(self) -> pd.DataFrame:
self.__calculation_source = self.__calculation_source[
~self.__calculation_source['status'].isin(['sold', 'delivered'])
].copy()
self.__calculation_source.loc[:, 'status'] = 'holding'
settled_current_position_status = self.__calculation_source[
[
'contract', 'delivery_month', 'value', 'cost',
'close_price', 'status'
]
]
settled_current_position_status = settled_current_position_status.rename(
index=int, columns={'close_price': 'close_price_of_previous_trading_date'}
)
return settled_current_position_status.reset_index(drop=True)
|
nilq/baby-python
|
python
|
'''
Stanle Bak
Python F-16
Thrust function
'''
import numpy as np
import tensorflow as tf
from util import fix, fix_tf
def thrust(power, alt, rmach):
'thrust lookup-table version'
a = np.array([[1060, 670, 880, 1140, 1500, 1860], \
[635, 425, 690, 1010, 1330, 1700], \
[60, 25, 345, 755, 1130, 1525], \
[-1020, -170, -300, 350, 910, 1360], \
[-2700, -1900, -1300, -247, 600, 1100], \
[-3600, -1400, -595, -342, -200, 700]], dtype=float).T
b = np.array([[12680, 9150, 6200, 3950, 2450, 1400], \
[12680, 9150, 6313, 4040, 2470, 1400], \
[12610, 9312, 6610, 4290, 2600, 1560], \
[12640, 9839, 7090, 4660, 2840, 1660], \
[12390, 10176, 7750, 5320, 3250, 1930], \
[11680, 9848, 8050, 6100, 3800, 2310]], dtype=float).T
c = np.array([[20000, 15000, 10800, 7000, 4000, 2500], \
[21420, 15700, 11225, 7323, 4435, 2600], \
[22700, 16860, 12250, 8154, 5000, 2835], \
[24240, 18910, 13760, 9285, 5700, 3215], \
[26070, 21075, 15975, 11115, 6860, 3950], \
[28886, 23319, 18300, 13484, 8642, 5057]], dtype=float).T
if alt < 0:
alt = 0.01 # uh, why not 0?
h = .0001 * alt
i = fix(h)
if i >= 5:
i = 4
dh = h - i
rm = 5 * rmach
m = fix(rm)
if m >= 5:
m = 4
elif m <= 0:
m = 0
dm = rm - m
cdh = 1 - dh
# do not increment these, since python is 0-indexed while matlab is 1-indexed
#i = i + 1
#m = m + 1
s = b[i, m] * cdh + b[i + 1, m] * dh
t = b[i, m + 1] * cdh + b[i + 1, m + 1] * dh
tmil = s + (t - s) * dm
if power < 50:
s = a[i, m] * cdh + a[i + 1, m] * dh
t = a[i, m + 1] * cdh + a[i + 1, m + 1] * dh
tidl = s + (t - s) * dm
thrst = tidl + (tmil - tidl) * power * .02
else:
s = c[i, m] * cdh + c[i + 1, m] * dh
t = c[i, m + 1] * cdh + c[i + 1, m + 1] * dh
tmax = s + (t - s) * dm
thrst = tmil + (tmax - tmil) * (power - 50) * .02
return thrst
def thrust_tf(power, alt, rmach):
with tf.name_scope("threst"):
a = tf.constant(np.array([[1060, 670, 880, 1140, 1500, 1860], \
[635, 425, 690, 1010, 1330, 1700], \
[60, 25, 345, 755, 1130, 1525], \
[-1020, -170, -300, 350, 910, 1360], \
[-2700, -1900, -1300, -247, 600, 1100], \
[-3600, -1400, -595, -342, -200, 700]], dtype=np.float32).T)
b = tf.constant(np.array([[12680, 9150, 6200, 3950, 2450, 1400], \
[12680, 9150, 6313, 4040, 2470, 1400], \
[12610, 9312, 6610, 4290, 2600, 1560], \
[12640, 9839, 7090, 4660, 2840, 1660], \
[12390, 10176, 7750, 5320, 3250, 1930], \
[11680, 9848, 8050, 6100, 3800, 2310]], dtype=np.float32).T)
c = tf.constant(np.array([[20000, 15000, 10800, 7000, 4000, 2500], \
[21420, 15700, 11225, 7323, 4435, 2600], \
[22700, 16860, 12250, 8154, 5000, 2835], \
[24240, 18910, 13760, 9285, 5700, 3215], \
[26070, 21075, 15975, 11115, 6860, 3950], \
[28886, 23319, 18300, 13484, 8642, 5057]], dtype=np.float32).T)
with tf.name_scope("threst"):
alt = tf.cond(tf.less(alt, 0), lambda: 0.01, lambda: alt)
h = .0001 * alt
i = fix_tf(h)
i = tf.cond(tf.greater_equal(i, 5.0), lambda: 4.0, lambda: i)
dh = h - i
rm = 5 * rmach
m = fix_tf(rm)
m = tf.clip_by_value(m, 0, 4)
dm = rm - m
cdh = 1 - dh
# do not increment these, since python is 0-indexed while matlab is 1-indexed
#i = i + 1
#m = m + 1
i = tf.cast(i, tf.int32)
m = tf.cast(m, tf.int32)
s = b[i, m] * cdh + b[i + 1, m] * dh
t = b[i, m + 1] * cdh + b[i + 1, m + 1] * dh
tmil = s + (t - s) * dm
def f1():
s = a[i, m] * cdh + a[i + 1, m] * dh
t = a[i, m + 1] * cdh + a[i + 1, m + 1] * dh
tidl = s + (t - s) * dm
thrst = tidl + (tmil - tidl) * power * .02
return thrst
def f2():
s = c[i, m] * cdh + c[i + 1, m] * dh
t = c[i, m + 1] * cdh + c[i + 1, m + 1] * dh
tmax = s + (t - s) * dm
thrst = tmil + (tmax - tmil) * (power - 50) * .02
return thrst
thrst = tf.cond(tf.less(power, 50), f1, f2)
return thrst
def test_thrust_tf():
def template(power, alt, rmach):
power_tf = tf.constant(power, dtype=tf.float32)
alt_tf = tf.constant(alt, dtype=tf.float32)
rmach_tf = tf.constant(rmach, dtype=tf.float32)
with tf.Session() as sess:
print(sess.run(thrust_tf(power_tf, alt_tf, rmach_tf)))
print(thrust(power, alt, rmach))
# alt < 0, alt < 500, alt > 500
# rmach < 0.8, rmach > 0.8
# power < 50, power > 50
for a in (-1, 499, 501):
for r in (0.79, 0.81):
for p in (49, 51):
template(p, a, r)
if __name__ == "__main__":
test_thrust_tf()
|
nilq/baby-python
|
python
|
from datetime import date
nascimento = int(input('qual o ano do seu nascimento: '))
anoatual = date.today().year
idade = anoatual - nascimento
if idade <= 9:
print('VocÊ tem {} anos, sua categoria é Mirim'.format(idade))
elif idade <= 14 and idade > 9:
print('Você tem {} anos, sua categoria é Infantil'.format(idade))
elif idade > 14 and idade < 20:
print('Você tem {} anos, sua categoria é Junior'.format(idade))
elif idade == 20:
print('Você tem {} anos, sua categoria é Senior'.format(idade))
else:
print('Você tem {} anos, sua categoria é Master'.format(idade))
|
nilq/baby-python
|
python
|
# coding=utf-8
class Human(object):
def __init__(self, input_gender):
self.gender = input_gender
def printGender(self):
print self.gender
li_lei = Human('male') # 这里,'male'作为参数传递给__init__()方法的input_gender变量。
print li_lei.gender #这一行结果与下一行对比
li_lei.printGender() #这一行结果与上一行对比
|
nilq/baby-python
|
python
|
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image
from torchvision.models import resnet50
import torch
model = resnet50(pretrained=True)
target_layers = [model.layer4[-1]]
input_tensor = 0
# Note: input_tensor can be a batch tensor with several images!
# Construct the CAM object once, and then re-use it on many images:
cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True if torch.cuda.is_available() else False)
# You can also use it within a with statement, to make sure it is freed,
# In case you need to re-create it inside an outer loop:
# with GradCAM(model=model, target_layers=target_layers, use_cuda=args.use_cuda) as cam:
# ...
# We have to specify the target we want to generate
# the Class Activation Maps for.
# If targets is None, the highest scoring category
# will be used for every image in the batch.
# Here we use ClassifierOutputTarget, but you can define your own custom targets
# That are, for example, combinations of categories, or specific outputs in a non standard model.
target_category = 0
# You can also pass aug_smooth=True and eigen_smooth=True, to apply smoothing.
grayscale_cam = cam(input_tensor=input_tensor, target_category=target_category)
# In this example grayscale_cam has only one image in the batch:
grayscale_cam = grayscale_cam[0, :]
visualization = show_cam_on_image(input_tensor, grayscale_cam, use_rgb=True)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from email.message import EmailMessage
import smtplib, ssl
import getpass
message = EmailMessage()
sender = "me@example.com"
recipient = "youw@example.com"
message['From'] = sender
message['To'] = recipient
message['Subject'] = 'Greetings from {} to {}!'.format(sender, recipient)
body = """Hey there!
I'm learning to send emails using Python!"""
message.set_content(body)
mail_server = smtplib.SMTP('localhost')
mail_server.send_message(message)
#mail_server.set_debuglevel(1)
#print(mail_pass)
#print(message)
|
nilq/baby-python
|
python
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.spinoffs.oryx.experimental.nn.normalization."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
from jax import test_util as jtu
import numpy as np
from oryx.core import state
from oryx.experimental.nn import normalization
class NormalizationTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._seed = random.PRNGKey(0)
@parameterized.named_parameters(
('hwc', (0, 1), (7,), (1, 1, 7)),
('chw', (1, 2), (5,), (5, 1, 1)))
def test_spec(self, axis, param_shape, moving_shape):
key = self._seed
net_init = normalization.BatchNorm(axis)
in_shape = (5, 6, 7)
out_shape = net_init.spec(state.Shape(in_shape)).shape
net = net_init.init(key, state.Shape(in_shape))
self.assertEqual(out_shape, in_shape)
beta, gamma = net.params
self.assertEqual(param_shape, beta.shape)
self.assertEqual(param_shape, gamma.shape)
moving_mean, moving_var = net.state.moving_mean, net.state.moving_var
self.assertEqual(moving_shape, moving_mean.shape)
self.assertEqual(moving_shape, moving_var.shape)
@parameterized.named_parameters(
('center_scale', True, True),
('no_center', False, True),
('no_scale', True, False),
('no_center_no_scale', False, False))
def test_params(self, center, scale):
key = self._seed
net_init = normalization.BatchNorm(center=center, scale=scale)
in_shape = (5, 6, 7)
out_shape = net_init.spec(state.Shape(in_shape)).shape
net = net_init.init(key, state.Shape(in_shape))
self.assertEqual(out_shape, in_shape)
beta, gamma = net.params
if center:
self.assertEqual(beta.shape, (7,))
np.testing.assert_almost_equal(np.zeros_like(beta), beta)
else:
self.assertEqual(beta, ())
if scale:
self.assertEqual(gamma.shape, (7,))
np.testing.assert_almost_equal(np.ones_like(gamma), gamma)
else:
self.assertEqual(gamma, ())
def test_call_no_batch(self):
epsilon = 1e-5
axis = (0, 1)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis, epsilon=epsilon)
in_shape = (5, 6, 7)
net = net_init.init(net_rng, state.Shape(in_shape))
x = random.normal(data_rng, in_shape)
net_y = net(x)
np.testing.assert_allclose(x, net_y)
with self.assertRaises(ValueError):
net_y = net(x[None])
@parameterized.named_parameters(
('center_scale', True, True),
('no_center', False, True),
('no_scale', True, False),
('no_center_no_scale', False, False))
def test_call(self, center, scale):
epsilon = 1e-5
axis = (0, 1)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis, center=center, scale=scale)
in_shape = (5, 6, 7)
net = net_init.init(net_rng, state.Shape(in_shape))
beta, gamma = net.params
x = random.normal(data_rng, (10,) + in_shape)
batch_axis = (0,) + tuple(a + 1 for a in axis)
mean = np.mean(np.array(x), batch_axis, keepdims=True)[0]
var = np.var(np.array(x), batch_axis, keepdims=True)[0]
z = (x - mean) / np.sqrt(var + epsilon)
if center and scale:
y = gamma * z + beta
elif center:
y = z + beta
elif scale:
y = gamma * z
else:
y = z
net_y = jax.vmap(net)(x)
np.testing.assert_almost_equal(y, np.array(net_y), decimal=6)
def test_no_training(self):
epsilon = 1e-5
axis = (0, 1)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis, center=False, scale=False)
in_shape = (5, 6, 7)
net = net_init.init(net_rng, state.Shape(in_shape))
x = random.normal(data_rng, (4,) + in_shape)
z = x / np.sqrt(1.0 + epsilon)
y = jax.vmap(lambda x: net(x, training=False))(x)
np.testing.assert_almost_equal(z, np.array(y), decimal=6)
def test_updates_moving_mean_var(self):
axis = (0, 1)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis, momentum=0.9)
in_shape = (5, 6, 7)
net = net_init.init(net_rng, state.Shape(in_shape))
self.assertAlmostEqual(0.1, net.info.decay)
x = random.normal(data_rng, (4,) + in_shape)
batch_axis = (0,) + tuple(a + 1 for a in axis)
mean = np.mean(np.array(x), batch_axis, keepdims=True)[0]
var = np.var(np.array(x), batch_axis, keepdims=True)[0]
net_state = net.state
# Initial values
np.testing.assert_almost_equal(np.zeros_like(mean), net_state.moving_mean)
np.testing.assert_almost_equal(np.ones_like(var), net_state.moving_var)
# Update state (moving_mean, moving_var)
for _ in range(100):
net = jax.vmap(net.update, out_axes=None)(x)
# Final values
np.testing.assert_almost_equal(mean, net.state.moving_mean, decimal=4)
np.testing.assert_almost_equal(var, net.state.moving_var, decimal=4)
def test_check_grads(self):
axis = (0, 1, 2)
in_shape = (4, 5, 6, 7)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis)
net = net_init.init(net_rng, state.Shape(in_shape))
x = random.normal(data_rng, in_shape)
jtu.check_grads(net, (x,), 2)
def mse(x, y):
return jax.numpy.mean(jax.numpy.square(y - x))
def reconstruct_loss(net, x, **kwargs):
preds, net = jax.vmap(
lambda x: net.call_and_update(x, **kwargs), # pylint: disable=unnecessary-lambda
out_axes=(0, None))(x)
return mse(x, preds), net
class GradTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._seed = random.PRNGKey(0)
def test_batch_norm_moving_vars_grads(self):
net_rng, data_rng = random.split(self._seed)
axis = (0, 1)
in_shape = (2, 2, 2)
network_init = normalization.BatchNorm(axis)
network = network_init.init(net_rng, state.Shape(in_shape))
grad_fn = jax.grad(reconstruct_loss, has_aux=True)
x0 = random.normal(data_rng, (2,) + in_shape)
grads, _ = grad_fn(network, x0)
grads_moving_mean, grads_moving_var = grads.state
np.testing.assert_almost_equal(np.zeros_like(grads_moving_mean),
grads_moving_mean)
np.testing.assert_almost_equal(np.zeros_like(grads_moving_var),
grads_moving_var)
def test_batch_norm(self):
net_rng, data_rng = random.split(self._seed)
axis = (0, 1)
in_shape = (2, 2, 2)
network_init = normalization.BatchNorm(axis)
initial_network = network_init.init(net_rng, state.Shape(in_shape))
grad_fn = jax.grad(reconstruct_loss, has_aux=True)
x0 = random.normal(data_rng, (2,) + in_shape)
# reconstruct_loss updates network state
initial_loss, network = reconstruct_loss(initial_network, x0)
# grad also updates network state
grads, new_network = grad_fn(network, x0)
self.assertGreater(initial_loss, 0.0)
# Make sure grad_fn updates the state.
self.assertGreater(mse(initial_network.state.moving_mean,
new_network.state.moving_mean),
0.0)
self.assertGreater(mse(initial_network.state.moving_var,
new_network.state.moving_var),
0.0)
final_network = new_network.replace(params=jax.tree_util.tree_multimap(
lambda w, g: w - 0.1 * g, network.params, grads.params))
final_loss, final_network = reconstruct_loss(final_network, x0)
self.assertLess(final_loss, initial_loss)
self.assertGreater(mse(new_network.state.moving_mean,
final_network.state.moving_mean), 0.0)
self.assertGreater(mse(new_network.state.moving_var,
final_network.state.moving_var), 0.0)
if __name__ == '__main__':
absltest.main()
|
nilq/baby-python
|
python
|
import os
import sys
import math import json
import numpy as np
from pycocotools.coco import COCO
import pickle
sys.path.insert(0,'..' )
from config import cfg
COCO_TO_OURS = [0, 15, 14, 17, 16, 5, 2, 6, 3, 7, 4, 11, 8, 12, 9, 13, 10]
def processing(ann_path, filelist_path, masklist_path, json_path, mask_dir):
coco = COCO(ann_path)
ids = list(coco.imgs.keys())
lists = []
filelist_fp = open(filelist_path, 'w')
masklist_fp = open(masklist_path, 'w')
for i, img_id in enumerate(ids):
ann_ids = coco.getAnnIds(imgIds=img_id)
img_anns = coco.loadAnns(ann_ids)
numPeople = len(img_anns)
name = coco.imgs[img_id]['file_name']
height = coco.imgs[img_id]['height']
width = coco.imgs[img_id]['width']
person_centers = []
info = dict()
info['filename'] = name
info['info'] = []
for p in range(numPeople):
if img_anns[p]['num_keypoints'] < 5 or img_anns[p]['area'] < 32 * 32:
continue
kpt = img_anns[p]['keypoints']
dic = dict()
# person center
person_center = [img_anns[p]['bbox'][0] + img_anns[p]['bbox'][2] / 2.0, img_anns[p]['bbox'][1] + img_anns[p]['bbox'][3] / 2.0]
scale = img_anns[p]['bbox'][3] / float(cfg.INPUT_SIZE)
# skip this person if the distance to exiting person is too small
flag = 0
for pc in person_centers:
dis = math.sqrt((person_center[0] - pc[0]) * (person_center[0] - pc[0]) + (person_center[1] - pc[1]) * (person_center[1] - pc[1]))
if dis < pc[2] * 0.3:
flag = 1;
break
if flag == 1:
continue
dic['pos'] = person_center
dic['keypoints'] = np.zeros((18, 3)).tolist()
dic['scale'] = scale
for part in range(17):
dic['keypoints'][COCO_TO_OURS[part]][0] = kpt[part * 3]
dic['keypoints'][COCO_TO_OURS[part]][1] = kpt[part * 3 + 1]
# visiable is 2, unvisiable is 1 and not labeled is 0
dic['keypoints'][COCO_TO_OURS[part]][2] = kpt[part * 3 + 2]
# generate neck point based on LShoulder and RShoulder
dic['keypoints'][1][0] = (kpt[5 * 3] + kpt[6 * 3]) * 0.5
dic['keypoints'][1][1] = (kpt[5 * 3 + 1] + kpt[6 * 3 + 1]) * 0.5
if kpt[5 * 3 + 2] == 0 or kpt[6 * 3 + 2] == 0:
dic['keypoints'][1][2] = 0
else:
dic['keypoints'][1][2] = 1
info['info'].append(dic)
person_centers.append(np.append(person_center, max(img_anns[p]['bbox'][2], img_anns[p]['bbox'][3])))
if len(info['info']) > 0:
lists.append(info)
filelist_fp.write(name + '\n')
mask_all = np.zeros((height, width), dtype=np.uint8)
mask_miss = np.zeros((height, width), dtype=np.uint8)
flag = 0
for p in img_anns:
if p['iscrowd'] == 1:
mask_crowd = coco.annToMask(p)
temp = np.bitwise_and(mask_all, mask_crowd)
mask_crowd = mask_crowd - temp
flag += 1
continue
else:
mask = coco.annToMask(p)
mask_all = np.bitwise_or(mask, mask_all)
if p['num_keypoints'] <= 0:
mask_miss = np.bitwise_or(mask, mask_miss)
if flag < 1:
mask_miss = np.logical_not(mask_miss)
elif flag == 1:
mask_miss = np.logical_not(np.bitwise_or(mask_miss, mask_crowd))
mask_all = np.bitwise_or(mask_all, mask_crowd)
else:
raise Exception('crowd segments > 1')
pickle.dump(mask_miss, open(os.path.join(mask_dir, name.split('.')[0] + '.npy'), 'w'))
masklist_fp.write(os.path.join(mask_dir, name.split('.')[0] + '.npy') + '\n')
if i % 1000 == 0:
print "Processed {} of {}".format(i, len(ids))
masklist_fp.close()
filelist_fp.close()
fp = open(json_path, 'w')
fp.write(json.dumps(lists))
fp.close()
print 'done!'
if __name__ == '__main__':
processing(cfg.TRAIN_ANNO_PATH,
cfg.TRAIN_IMAGELIST_FILE,
cfg.TRAIN_MASKLIST_FILE,
cfg.TRAIN_KPTJSON_FILE,
cfg.TRAIN_MASK_PATH)
processing(cfg.TEST_ANNO_PATH,
cfg.TEST_IMAGELIST_FILE,
cfg.TEST_MASKLIST_FILE,
cfg.TEST_KPTJSON_FILE,
cfg.TEST_MASK_PATH)
|
nilq/baby-python
|
python
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
SPEC = {
'settings': {
'build_gs_bucket': 'chromium-v8',
# WARNING: src-side runtest.py is only tested with chromium CQ builders.
# Usage not covered by chromium CQ is not supported and can break
# without notice.
'src_side_runtest_py': True,
},
'builders': {
'Linux - Future': {
'chromium_config': 'chromium',
'chromium_apply_config': [
'mb',
'ninja_confirm_noop',
'chrome_with_codecs'
],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'chromium_swarm_tests',
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'checkout_dir': 'linux',
},
'Linux - Future (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'checkout_dir': 'linux',
},
'Linux V8 API Stability': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['v8_canary', 'with_branch_heads'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'all',
],
'test_results_config': 'staging_server',
'testing': {
'platform': 'linux',
},
},
},
}
|
nilq/baby-python
|
python
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from yaql.language import exceptions
import yaql.tests
class TestQueries(yaql.tests.TestCase):
def test_where(self):
data = [1, 2, 3, 4, 5, 6]
self.assertEqual([4, 5, 6], self.eval('$.where($ > 3)', data=data))
def test_select(self):
data = [1, 2, 3]
self.assertEqual([1, 4, 9], self.eval('$.select($ * $)', data=data))
def test_keyword_collection_access(self):
data = [{'a': 2}, {'a': 4}]
self.assertEqual([2, 4], self.eval('$.a', data=data))
self.assertEqual([2, 4], self.eval('$.select($).a', data=data))
def test_skip(self):
data = [1, 2, 3, 4]
self.assertEqual([2, 3, 4], self.eval('$.skip(1)', data=data))
def test_limit(self):
data = [1, 2, 3, 4]
self.assertEqual([1, 2], self.eval('$.limit(2)', data=data))
self.assertEqual([1, 2], self.eval('$.take(2)', data=data))
def test_append(self):
data = [1, 2]
self.assertEqual([1, 2, 3, 4], self.eval('$.append(3, 4)', data=data))
def test_complex_query(self):
data = [1, 2, 3, 4, 5, 6]
self.assertEqual(
[4],
self.eval('$.where($ < 4).select($ * $).skip(1).limit(1)',
data=data))
def test_distinct(self):
data = [1, 2, 3, 2, 4, 8]
self.assertEqual([1, 2, 3, 4, 8], self.eval('$.distinct()', data=data))
self.assertEqual([1, 2, 3, 4, 8], self.eval('distinct($)', data=data))
def test_distinct_structures(self):
data = [{'a': 1}, {'b': 2}, {'a': 1}]
self.assertEqual(
[{'a': 1}, {'b': 2}],
self.eval('$.distinct()', data=data))
def test_distinct_with_selector(self):
data = [['a', 1], ['b', 2], ['c', 1], ['d', 3], ['e', 2]]
self.assertCountEqual([['a', 1], ['b', 2], ['d', 3]],
self.eval('$.distinct($[1])', data=data))
self.assertCountEqual([['a', 1], ['b', 2], ['d', 3]],
self.eval('distinct($, $[1])', data=data))
def test_any(self):
self.assertFalse(self.eval('$.any()', data=[]))
self.assertTrue(self.eval('$.any()', data=[0]))
def test_all(self):
self.assertTrue(self.eval('$.all()', data=[]))
self.assertFalse(self.eval('$.all()', data=[1, 0]))
self.assertTrue(self.eval('$.all()', data=[1, 2]))
self.assertFalse(self.eval('$.all($ > 1)', data=[2, 1]))
self.assertTrue(self.eval('$.all($ > 1)', data=[2, 3]))
def test_enumerate(self):
data = [1, 2, 3]
self.assertEqual([[0, 1], [1, 2], [2, 3]],
self.eval('$.enumerate()', data=data))
self.assertEqual([[3, 1], [4, 2], [5, 3]],
self.eval('$.enumerate(3)', data=data))
self.assertEqual([[0, 1], [1, 2], [2, 3]],
self.eval('enumerate($)', data=data))
self.assertEqual([[3, 1], [4, 2], [5, 3]],
self.eval('enumerate($, 3)', data=data))
def test_concat(self):
data = [1, 2, 3]
self.assertEqual(
[1, 2, 3, 2, 4, 6],
self.eval('$.select($).concat($.select(2 * $))', data=data))
self.assertEqual(
[1, 2, 3, 2, 4, 6, 1, 2, 3],
self.eval('concat($, $.select(2 * $), $)', data=data))
def test_len(self):
data = [1, 2, 3]
self.assertEqual(3, self.eval('len($)', data=data))
self.assertEqual(3, self.eval('$.len()', data=data))
self.assertEqual(3, self.eval('$.count()', data=data))
self.assertRaises(
exceptions.FunctionResolutionError,
self.eval, 'count($)', data=data)
def test_sum(self):
data = range(4)
self.assertEqual(6, self.eval('$.sum()', data=data))
self.assertEqual(106, self.eval('$.sum(100)', data=data))
self.assertEqual(100, self.eval('[].sum(100)'))
def test_memorize(self):
generator_func = lambda: (i for i in range(3)) # noqa: E731
self.assertRaises(
TypeError,
self.eval, '$.len() + $.sum()', data=generator_func())
self.assertEqual(
6,
self.eval('let($.memorize()) -> $.len() + $.sum()',
data=generator_func()))
def test_first(self):
self.assertEqual(2, self.eval('list(2, 3).first()'))
self.assertEqual(4, self.eval('list(2, 3).select($ * 2).first()'))
self.assertIsNone(self.eval('list().first(null)'))
self.assertRaises(StopIteration, self.eval, 'list().first()')
self.assertEqual(99, self.eval('list().first(99)'))
def test_single(self):
self.assertEqual(2, self.eval('list(2).single()'))
self.assertRaises(StopIteration, self.eval, 'list().single()')
self.assertRaises(StopIteration, self.eval, 'list(1, 2).single()')
def test_last(self):
self.assertEqual(3, self.eval('list(2, 3).last()'))
self.assertEqual(6, self.eval('list(2, 3).select($ * 2).last()'))
self.assertIsNone(self.eval('list().last(null)'))
self.assertEqual(99, self.eval('list().last(99)'))
self.assertRaises(StopIteration, self.eval, 'list().last()')
def test_range(self):
self.assertEqual([0, 1], self.eval('range(2)'))
self.assertEqual([1, 2, 3], self.eval('range(1, 4)'))
self.assertEqual([4, 3, 2], self.eval('range(4, 1, -1)'))
def test_select_many(self):
self.assertEqual([0, 0, 1, 0, 1, 2],
self.eval('range(4).selectMany(range($))'))
def test_select_many_scalar(self):
# check that string is not interpreted as a sequence and that
# selectMany works when selector returns scalar
self.assertEqual(
['xx', 'xx'],
self.eval('range(2).selectMany(xx)'))
def test_order_by(self):
self.assertEqual(
[1, 2, 3, 4],
self.eval('$.orderBy($)', data=[4, 2, 1, 3]))
self.assertEqual(
[4, 3, 2, 1],
self.eval('$.orderByDescending($)', data=[4, 2, 1, 3]))
def test_order_by_multilevel(self):
self.assertEqual(
[[1, 0], [1, 5], [2, 2]],
self.eval(
'$.orderBy($[0]).thenBy($[1])',
data=[[2, 2], [1, 5], [1, 0]]))
self.assertEqual(
[[1, 5], [1, 0], [2, 2]],
self.eval(
'$.orderBy($[0]).thenByDescending($[1])',
data=[[2, 2], [1, 5], [1, 0]]))
self.assertEqual(
[[2, 2], [1, 0], [1, 5]],
self.eval(
'$.orderByDescending($[0]).thenBy($[1])',
data=[[2, 2], [1, 5], [1, 0]]))
self.assertEqual(
[[2, 2], [1, 5], [1, 0]],
self.eval(
'$.orderByDescending($[0]).thenByDescending($[1])',
data=[[2, 2], [1, 5], [1, 0]]))
def test_group_by(self):
data = {'a': 1, 'b': 2, 'c': 1, 'd': 3, 'e': 2}
self.assertCountEqual(
[
[1, [['a', 1], ['c', 1]]],
[2, [['b', 2], ['e', 2]]],
[3, [['d', 3]]]
],
self.eval('$.items().orderBy($[0]).groupBy($[1])', data=data))
self.assertCountEqual(
[[1, ['a', 'c']], [2, ['b', 'e']], [3, ['d']]],
self.eval('$.items().orderBy($[0]).groupBy($[1], $[0])',
data=data))
self.assertCountEqual(
[[1, 'ac'], [2, 'be'], [3, 'd']],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1], $[0], $.sum())', data=data))
self.assertCountEqual(
[[1, ['a', 1, 'c', 1]], [2, ['b', 2, 'e', 2]], [3, ['d', 3]]],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1],, $.sum())',
data=data))
self.assertCountEqual(
[[1, ['a', 1, 'c', 1]], [2, ['b', 2, 'e', 2]], [3, ['d', 3]]],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1], aggregator => $.sum())',
data=data))
def test_group_by_old_syntax(self):
# Test the syntax used in 1.1.1 and earlier, where the aggregator
# function was passed the key as well as the value list, and returned
# the key along with the aggregated value. This ensures backward
# compatibility with existing expressions.
data = {'a': 1, 'b': 2, 'c': 1, 'd': 3, 'e': 2}
self.assertItemsEqual(
[[1, 'ac'], [2, 'be'], [3, 'd']],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1], $[0], [$[0], $[1].sum()])', data=data))
self.assertItemsEqual(
[[1, ['a', 1, 'c', 1]], [2, ['b', 2, 'e', 2]], [3, ['d', 3]]],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1],, [$[0], $[1].sum()])',
data=data))
self.assertItemsEqual(
[[1, ['a', 1, 'c', 1]], [2, ['b', 2, 'e', 2]], [3, ['d', 3]]],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1], aggregator => [$[0], $[1].sum()])',
data=data))
def test_join(self):
self.assertEqual(
[[2, 1], [3, 1], [3, 2], [4, 1], [4, 2], [4, 3]],
self.eval('$.join($, $1 > $2, [$1, $2])', data=[1, 2, 3, 4]))
self.assertEqual(
[[1, 3], [1, 4], [2, 3], [2, 4]],
self.eval('[1,2].join([3, 4], true, [$1, $2])'))
def test_zip(self):
self.assertEqual(
[[1, 4], [2, 5]],
self.eval('[1, 2, 3].zip([4, 5])'))
self.assertEqual(
[[1, 4, 6], [2, 5, 7]],
self.eval('[1, 2, 3].zip([4, 5], [6, 7, 8])'))
def test_zip_longest(self):
self.assertEqual(
[[1, 4], [2, 5], [3, None]],
self.eval('[1, 2, 3].zipLongest([4, 5])'))
self.assertEqual(
[[1, 4, 6], [2, 5, None], [3, None, None]],
self.eval('[1, 2, 3].zipLongest([4, 5], [6])'))
self.assertEqual(
[[1, 4], [2, 5], [3, 0]],
self.eval('[1, 2, 3].zipLongest([4, 5], default => 0)'))
def test_repeat(self):
self.assertEqual(
[None, None],
self.eval('null.repeat(2)'))
self.assertEqual(
[1, 1, 1, 1, 1],
self.eval('1.repeat().limit(5)'))
def test_cycle(self):
self.assertEqual(
[1, 2, 1, 2, 1],
self.eval('[1, 2].cycle().take(5)'))
def test_take_while(self):
self.assertEqual(
[1, 2, 3],
self.eval('[1, 2, 3, 4, 5].takeWhile($ < 4)'))
def test_skip_while(self):
self.assertEqual(
[4, 5],
self.eval('[1, 2, 3, 4, 5].skipWhile($ < 4)'))
def test_index_of(self):
self.assertEqual(1, self.eval('[1, 2, 3, 2, 1].indexOf(2)'))
self.assertEqual(-1, self.eval('[1, 2, 3, 2, 1].indexOf(22)'))
def test_last_index_of(self):
self.assertEqual(3, self.eval('[1, 2, 3, 2, 1].lastIndexOf(2)'))
self.assertEqual(-1, self.eval('[1, 2, 3, 2, 1].lastIndexOf(22)'))
def test_index_where(self):
self.assertEqual(1, self.eval('[1, 2, 3, 2, 1].indexWhere($ = 2)'))
self.assertEqual(-1, self.eval('[1, 2, 3, 2, 1].indexWhere($ = 22)'))
def test_last_index_where(self):
self.assertEqual(3, self.eval('[1, 2, 3, 2, 1].lastIndexWhere($ = 2)'))
self.assertEqual(
-1, self.eval('[1, 2, 3, 2, 1].lastIndexWhere($ = 22)'))
def test_slice(self):
self.assertEqual(
[[1, 2], [3, 4], [5]],
self.eval('range(1, 6).slice(2)'))
self.assertEqual(
[[1, 2], [3, 4], [5]],
self.eval('[1,2,3,4,5].slice(2)'))
def test_split_where(self):
self.assertEqual(
[[], [2, 3], [5]],
self.eval('range(1, 6).splitWhere($ mod 3 = 1)'))
def test_split_at(self):
self.assertEqual(
[[1, 2], [3, 4, 5]],
self.eval('range(1, 6).splitAt(2)'))
def test_slice_where(self):
self.assertEqual(
[['a', 'a'], ['b'], ['a', 'a']],
self.eval('[a,a,b,a,a].sliceWhere($ != a)'))
def test_aggregate(self):
self.assertEqual(
'aabaa',
self.eval('[a,a,b,a,a].aggregate($1 + $2)'))
self.assertRaises(
TypeError,
self.eval, '[].aggregate($1 + $2)')
self.assertEqual(
1,
self.eval('[].aggregate($1 + $2, 1)'))
self.assertEqual(
'aabaa',
self.eval('[a,a,b,a,a].reduce($1 + $2)'))
self.assertEqual(
0,
self.eval('[].reduce(max($1, $2), 0)'))
def test_accumulate(self):
self.assertEqual(
['a', 'aa', u'aab', 'aaba', 'aabaa'],
self.eval('[a,a,b,a,a].accumulate($1 + $2)'))
self.assertEqual(
[1],
self.eval('[].accumulate($1 + $2, 1)'))
def test_default_if_empty(self):
self.assertEqual(
[1, 2],
self.eval('[].defaultIfEmpty([1, 2])'))
self.assertEqual(
[3, 4],
self.eval('[3, 4].defaultIfEmpty([1, 2])'))
self.assertEqual(
[1, 2],
self.eval('[].select($).defaultIfEmpty([1, 2])'))
self.assertEqual(
[3, 4],
self.eval('[3, 4].select($).defaultIfEmpty([1, 2])'))
def test_generate(self):
self.assertEqual(
[0, 2, 4, 6, 8],
self.eval('generate(0, $ < 10, $ + 2)'))
self.assertEqual(
[0, 4, 16, 36, 64],
self.eval('generate(0, $ < 10, $ + 2, $ * $)'))
def test_generate_many(self):
friends = {
'John': ['Jim'],
'Jim': ['Jay', 'Jax'],
'Jax': ['John', 'Jacob', 'Jonathan'],
'Jacob': ['Jonathan', 'Jenifer'],
}
self.assertEqual(
['John', 'Jim', 'Jay', 'Jax', 'Jacob', 'Jonathan', 'Jenifer'],
self.eval(
'generateMany(John, $data.get($, []), decycle => true)',
friends))
self.assertEqual(
['John', 'Jim', 'Jay', 'Jax', 'Jacob', 'Jonathan', 'Jenifer'],
self.eval(
'generateMany(John, $data.get($, []), '
'decycle => true, depthFirst => true)', friends))
self.assertEqual(
['Jay'],
self.eval('generateMany(Jay, $data.get($, []))', friends))
self.assertEqual(
['JAX', 'JOHN', 'JACOB', 'JONATHAN', 'JIM', 'JENIFER', 'JAY'],
self.eval(
'generateMany(Jax, $data.get($, []), $.toUpper(), '
'decycle => true)', friends))
def test_max(self):
self.assertEqual(
0,
self.eval('[].max(0)'))
self.assertRaises(
TypeError,
self.eval, '[].max()')
self.assertEqual(
234,
self.eval('[44, 234, 23].max()'))
def test_min(self):
self.assertEqual(
0,
self.eval('[].min(0)'))
self.assertRaises(
TypeError,
self.eval, '[].min()')
self.assertEqual(
23,
self.eval('[44, 234, 23].min()'))
def test_reverse(self):
self.assertEqual(
[9, 4, 1],
self.eval('range(1, 4).select($*$).reverse()'))
def test_merge_with(self):
dict1 = {'a': 1, 'b': 'x', 'c': [1, 2], 'x': {'a': 1}}
dict2 = {'d': 5, 'b': 'y', 'c': [2, 3], 'x': {'b': 2}}
self.assertEqual(
{'a': 1, 'c': [1, 2, 3], 'b': 'y', 'd': 5, 'x': {'a': 1, 'b': 2}},
self.eval(
'$.d1.mergeWith($.d2)',
data={'d1': dict1, 'd2': dict2}))
dict1 = {'a': 1, 'b': 2, 'c': [1, 2]}
dict2 = {'d': 5, 'b': 3, 'c': [2, 3]}
self.assertEqual(
{'a': 1, 'c': [1, 2, 2, 3], 'b': 3, 'd': 5},
self.eval(
'$.d1.mergeWith($.d2, $1 + $2)',
data={'d1': dict1, 'd2': dict2}))
self.assertEqual(
{'a': 1, 'b': 3, 'c': [2, 3], 'd': 5},
self.eval(
'$.d1.mergeWith($.d2, $1 + $2, maxLevels => 1)',
data={'d1': dict1, 'd2': dict2}))
self.assertEqual(
{'a': 1, 'b': 2, 'c': [1, 2, 3], 'd': 5},
self.eval(
'$.d1.mergeWith($.d2,, min($1, $2))',
data={'d1': dict1, 'd2': dict2}))
def test_is_iterable(self):
self.assertEqual(
True,
self.eval('isIterable([])'))
self.assertEqual(
True,
self.eval('isIterable([1,2])'))
self.assertEqual(
True,
self.eval('isIterable(set(1,2))'))
self.assertEqual(
False,
self.eval('isIterable(1)'))
self.assertEqual(
False,
self.eval('isIterable("foo")'))
self.assertEqual(
False,
self.eval('isIterable({"a" => 1})'))
def test_infinite_collections(self):
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'len(list(sequence()))')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'list(sequence())')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'len(dict(sequence().select([$, $])))')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'dict(sequence().select([$, $]))')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'sequence()')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'set(sequence())')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import rosunit
from mock import patch
from parameterized import parameterized, param
from fiware_ros_bridge.logging import getLogger
class TestGetLogger(unittest.TestCase):
@parameterized.expand([
param(logm='debugf', rosm='logdebug'),
param(logm='infof', rosm='loginfo'),
param(logm='warnf', rosm='logwarn'),
param(logm='errorf', rosm='logerr'),
param(logm='fatalf', rosm='logfatal'),
])
@patch('fiware_ros_bridge.logging.rospy')
def test_log_wo_params(self, mocked_rospy, logm, rosm):
name = 'foo'
message = 'test message'
log_message = '[{name}:{caller}] {message}'.format(
name=name,
caller=self.__class__.__name__ + '.' + sys._getframe().f_code.co_name,
message=message,
)
logger = getLogger(name)
assert logger.name == name
getattr(logger, logm)(message)
getattr(mocked_rospy, rosm).assert_called_once_with(log_message)
@parameterized.expand([
param(logm='debugf', rosm='logdebug'),
param(logm='infof', rosm='loginfo'),
param(logm='warnf', rosm='logwarn'),
param(logm='errorf', rosm='logerr'),
param(logm='fatalf', rosm='logfatal'),
])
@patch('fiware_ros_bridge.logging.rospy')
def test_log_w_params(self, mocked_rospy, logm, rosm):
name = 'foo'
message = 'test message'
arg0 = 'arg0'
arg1 = 'arg1'
kwargs0 = 'kwargs0'
kwargs1 = 'kwargs1'
log_message = '[{name}:{caller}] {message}, {arg1}, {kwargs0}, {arg0}, {kwargs1}'.format(
name=name,
caller=self.__class__.__name__ + '.' + sys._getframe().f_code.co_name,
message=message,
arg0=arg0,
arg1=arg1,
kwargs0=kwargs0,
kwargs1=kwargs1,
)
logger = getLogger(name)
assert logger.name == name
getattr(logger, logm)(message + ', {1}, {kwargs0}, {0}, {kwargs1}', arg0, arg1, kwargs1=kwargs1, kwargs0=kwargs0)
getattr(mocked_rospy, rosm).assert_called_once_with(log_message)
if __name__ == '__main__':
rosunit.unitrun('fiware_ros_bridge', 'test_logging', TestGetLogger)
|
nilq/baby-python
|
python
|
from sims4.tuning.tunable import HasTunableSingletonFactory, AutoFactoryInit, OptionalTunable, TunableVariant
from ui.ui_dialog import UiDialogOk, UiDialogOkCancel
import enum
import services
class SituationTravelRequestType(enum.Int):
ALLOW = ...
CAREER_EVENT = ...
DISALLOW = ...
class _SituationTravelRequestDisallow(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'dialog': OptionalTunable(description='\n If enabled, show a dialog informing the player of the travel\n prohibition. If disabled, silently fail.\n ', tunable=UiDialogOk.TunableFactory(description='\n The dialog to show when an incoming request is denied.\n '))}
def __call__(self, user_facing_situation, travel_situation_type, travel_request_fn, **kwargs):
if self.dialog is not None:
dialog = self.dialog(services.active_sim_info())
dialog.show_dialog()
@property
def restrict(self):
return SituationTravelRequestType.DISALLOW
class _SituationTravelRequestAllow(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'dialog': OptionalTunable(description='\n If enabled, display a prompt requiring player confirmation. If\n disabled, immediately end this situation and allow the travel\n request to go through.\n ', tunable=UiDialogOkCancel.TunableFactory())}
def __call__(self, user_facing_situation, travel_situation_type, travel_request_fn, **kwargs):
if self.dialog is None:
return travel_request_fn()
def on_response(dialog):
if dialog.accepted:
travel_request_fn()
dialog = self.dialog(services.active_sim_info())
dialog.show_dialog(on_response=on_response)
@property
def restrict(self):
return SituationTravelRequestType.ALLOW
class TunableSituationTravelRequestBehaviorVariant(TunableVariant):
def __init__(self, *args, **kwargs):
super().__init__(*args, disallow=_SituationTravelRequestDisallow.TunableFactory(), allow=_SituationTravelRequestAllow.TunableFactory(), default='disallow', **kwargs)
|
nilq/baby-python
|
python
|
"""
Intersecting Linked Lists
Given two singly linked lists that intersect at some point, find the intersecting node. The lists are non-cyclical.
In this example, assume nodes with the same value are the exact same node objects.
Input: 3 -> 7 -> 8 -> 10, 99 -> 1 -> 8 -> 10
Output: 8
=========================================
Find the longer linked list and move the pointer (now both list will have same number of elements).
After that move both pointers from the both lists and compare elements.
Time Complexity: O(N + M)
Space Complexity: O(1)
"""
############
# Solution #
############
# import ListNode class from ll_helpers.py
from ll_helpers import ListNode
def find_intersecting_node(ll1, ll2):
# count how many nodes contains the first ll
count1 = 0
temp1 = ll1
while temp1 is not None:
count1 += 1
temp1 = temp1.next
# count how many nodes contains the second ll
count2 = 0
temp2 = ll2
while temp2 is not None:
count2 += 1
temp2 = temp2.next
# move only one of the lls for the difference
m = min(count1, count2)
for i in range(count1 - m):
ll1 = ll1.next
for i in range(count2 - m):
ll2 = ll2.next
# find the intersecting node
intersect = None
while ll1 is not None:
# if the values are different, this is not the intersecting node
if ll1.val != ll2.val:
intersect = None
else:
# if the values are equal and there is no an intersecting node from before
# then this is the intersecting node
if intersect == None:
intersect = ll1
ll1 = ll1.next
ll2 = ll2.next
return intersect
###########
# Testing #
###########
# import build_ll method from ll_helpers.py
from ll_helpers import build_ll
# Test 1
# Correct result => 8
ll1 = build_ll([3, 7, 8, 10])
ll2 = build_ll([1, 8, 10])
print(find_intersecting_node(ll1, ll2).val)
|
nilq/baby-python
|
python
|
from setuptools import setup
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
setup(name='syncbn_gpu',
ext_modules=[CUDAExtension('syncbn_gpu', ['syncbn_cuda.cpp', 'syncbn_cuda_kernel.cu'])],
cmdclass={'build_ext': BuildExtension})
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
class MetadataError(Exception):
pass
class CopyError(RuntimeError):
pass
def err_contains_group(path):
raise ValueError('path %r contains a group' % path)
def err_contains_array(path):
raise ValueError('path %r contains an array' % path)
def err_array_not_found(path):
raise ValueError('array not found at path %r' % path)
def err_group_not_found(path):
raise ValueError('group not found at path %r' % path)
def err_path_not_found(path):
raise ValueError('nothing found at path %r' % path)
def err_bad_compressor(compressor):
raise ValueError('bad compressor; expected Codec object, found %r' %
compressor)
def err_fspath_exists_notdir(fspath):
raise ValueError('path exists but is not a directory: %r' % fspath)
def err_read_only():
raise PermissionError('object is read-only')
def err_boundscheck(dim_len):
raise IndexError('index out of bounds for dimension with length {}'
.format(dim_len))
def err_negative_step():
raise IndexError('only slices with step >= 1 are supported')
def err_too_many_indices(selection, shape):
raise IndexError('too many indices for array; expected {}, got {}'
.format(len(shape), len(selection)))
def err_vindex_invalid_selection(selection):
raise IndexError('unsupported selection type for vectorized indexing; only '
'coordinate selection (tuple of integer arrays) and mask selection '
'(single Boolean array) are supported; got {!r}'.format(selection))
|
nilq/baby-python
|
python
|
#Write a function to swap a number in place( that is, without temporary variables)
#Hint 491: Try picturing the two numbers, a and b, on a number
#Hint 715: Lef diff be the difference betweeen a and b. Can you use diff some way? Then can you get rid of this temporary variable.
#Hint 736: You could also try to using XOR
def swap(numberA, numberB):
numberA = numberA ^ numberB
numberB = numberA ^ numberB
numberA = numberA ^ numberB
return (numberA, numberB)
print(swap(20,10))
#Solution: Swipe using XOR, first using the variable A for to save the XOR of A = A ^ B. Second step is apply the XOR again but save
# in B = A ^ B (that's moment move A to B). Now , apply XOR of B in A to recovery B in A= A ^ B.
|
nilq/baby-python
|
python
|
msg = ['We see immediately that one needs little information to begin to break down the process.','An enciphering-deciphering machine (in general outline) of my invention has been sent to your organization.','The significance of this general conjecture, assuming its truth, is easy to see. It means that it may be feasible to design ciphers that are effectively unbreakable.','If qualified opinions incline to believe in the exponential conjecture, then I think we cannot afford not to make use of it.']
for item in msg:
print (len(item))
|
nilq/baby-python
|
python
|
"""
Sazonov, S. Yu., Ostriker, J. P., & Sunyaev, R. A. 2004, MNRAS, 347, 144
"""
import numpy as np
# Parameters for the Sazonov & Ostriker AGN template
_Alpha = 0.24
_Beta = 1.60
_Gamma = 1.06
_E_1 = 83e3
_K = 0.0041
_E_0 = (_Beta - _Alpha) * _E_1
_A = np.exp(2e3 / _E_1) * 2e3**_Alpha
_B = ((_E_0**(_Beta - _Alpha)) \
* np.exp(-(_Beta - _Alpha))) / \
(1.0 + (_K * _E_0**(_Beta - _Gamma)))
# Normalization constants to make the SOS04 spectrum continuous.
_SX_Normalization = 1.0
_UV_Normalization = _SX_Normalization * ((_A * 2e3**-_Alpha) * \
np.exp(-2e3 / _E_1)) / ((1.2 * 2e3**-1.7) * np.exp(2000.0 / 2000.))
_IR_Normalization = _UV_Normalization * ((1.2 * 10**-1.7) \
* np.exp(10.0 / 2e3)) / (1.2 * 159 * 10**-0.6)
_HX_Normalization = _SX_Normalization * (_A * _E_0**-_Alpha * \
np.exp(-_E_0 / _E_1)) / (_A * _B * (1.0 + _K * _E_0**(_Beta - _Gamma)) * \
_E_0**-_Beta)
def Spectrum(E, t=0.0, **kwargs):
"""
Broadband quasar template spectrum.
References
----------
Sazonov, S., Ostriker, J.P., & Sunyaev, R.A. 2004, MNRAS, 347, 144.
"""
op = (E < 10)
uv = (E >= 10) & (E < 2e3)
xs = (E >= 2e3) & (E < _E_0)
xh = (E >= _E_0) & (E < 4e5)
if type(E) in [int, float]:
if op:
F = _IR_Normalization * 1.2 * 159 * E**-0.6
elif uv:
F = _UV_Normalization * 1.2 * E**-1.7 * np.exp(E / 2000.0)
elif xs:
F = _SX_Normalization * _A * E**-_Alpha * np.exp(-E / _E_1)
elif xh:
F = _HX_Normalization * _A * _B * (1.0 + _K * \
E**(_Beta - _Gamma)) * E**-_Beta
else:
F = 0
else:
F = np.zeros_like(E)
F += op * _IR_Normalization * 1.2 * 159 * E**-0.6
F += uv * _UV_Normalization * 1.2 * E**-1.7 * np.exp(E / 2000.0)
F += xs * _SX_Normalization * _A * E**-_Alpha * np.exp(-E / _E_1)
F += xh * _HX_Normalization * _A * _B * (1.0 + _K * \
E**(_Beta - _Gamma)) * E**-_Beta
return E * F
|
nilq/baby-python
|
python
|
import sympy as sp
import numpy as np
import pickle
class SymbolicRateMatrixArrhenius(sp.Matrix):
"""
Symbolic representation of Arrhenius process rate matrix.
"""
class Symbols:
@classmethod
def _barrier_element_symbol(cls, i, j):
if i == j:
return 0
return sp.symbols('B_%d%d' % (i + 1, j + 1), real=True)
def __init__(self, N):
self.E_i = sp.symbols('E_1:%d' % (N + 1), real=True)
self.B_ij = sp.Matrix(N, N, self._barrier_element_symbol)
self.T = sp.symbols('T', real=True)
@classmethod
def _create_elements(cls, N):
symbols = cls.Symbols(N)
def create_symbolic_rate_matrix_element(i, j):
if i == j:
return 0
return sp.exp(- (symbols.B_ij[i, j] - symbols.E_i[j]) / symbols.T)
rate_matrix_symbolic = sp.Matrix(N, N, create_symbolic_rate_matrix_element)
# Set each diagonal element as minus the sum of the other elements in its column (ensures Detailed Balance)
rate_matrix_symbolic -= sp.diag(*np.sum(rate_matrix_symbolic, axis=0))
return rate_matrix_symbolic, symbols
def __new__(cls, N):
"""
Parameters
----------
N : int
Number of states.
"""
elements, symbols = cls._create_elements(N)
self = super().__new__(cls, elements)
self.symbols = symbols
return self
def subs_symbols(self, energies=None, barriers=None, temperature=None):
"""
Return a new rate matrix with subs applied to each entry.
Parameters
----------
energies : 1-D array or sequence of float
Energies of the states of the arrhenius, ordered in ascending order.
barriers : 2-D array
Matrix of energy barriers between states.
temperature : float
Temperature.
Returns
-------
new : SymbolicRateMatrixArrhenius
New instance of RateMatrixArrhenius with subs applied.
"""
subs_dict = {}
if energies is not None:
subs_dict.update(zip(self.symbols.E_i, energies))
if barriers is not None:
subs_dict.update(zip(np.ravel(self.symbols.B_ij), np.ravel(barriers)))
del subs_dict[0]
if temperature is not None:
subs_dict.update({self.symbols.T: temperature})
expr = self.subs(subs_dict)
if not expr.free_symbols:
expr = np.array(expr).astype(np.float64)
return expr
def lambdify(self, symmetric_barriers=False):
params = (self.symbols.T,) + self.symbols.E_i
if symmetric_barriers:
barriers_subs = dict(zip(np.ravel(np.triu(self.symbols.B_ij.T)),
np.ravel(np.triu(self.symbols.B_ij))))
barriers_free_symbols = set(barriers_subs.values())
expr = self.subs(barriers_subs)
else:
barriers_free_symbols = set(self.symbols.B_ij.values())
expr = self
params += tuple(filter(lambda b: b in barriers_free_symbols, self.symbols.B_ij.values()))
return sp.lambdify(params, expr)
class _SymbolicThreeStateEigensystem:
FILE_NAME_EIGENSYSTEM = 'three_state_eigensystem_symbolic.pickle'
@classmethod
def _file_path(cls):
import os
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
return os.path.join(__location__, cls.FILE_NAME_EIGENSYSTEM)
@classmethod
def _save_eigensystem(cls):
r_sym = SymbolicRateMatrixArrhenius(3)
eigensystem_right = r_sym.eigenvects()
eigensystem_left = r_sym.T.eigenvects()
eigenvalues, _, V = zip(*eigensystem_right)
_, _, U = zip(*eigensystem_left)
# The returned eigenvalues (from sympy) is ordered as: lam1, lam3, lam2 (seen in numerical checks)
u1, u3, u2 = [sp.Matrix(U[i][0]) for i in [0, 1, 2]]
lam1, lam3, lam2 = eigenvalues
v1, v3, v2 = [sp.Matrix(V[i][0]) for i in [0, 1, 2]]
# Normalization of left eigenvectors by their sum of their components
u1 = sp.simplify(u1 / (np.sum(u1) / 3.))
u2 = u2 / (np.sum(u2) / 3.)
u3 = u3 / (np.sum(u3) / 3.)
# Normalization of right eigenvectors by the inner product with the left eigenvectors
v1 = v1 / u1.dot(v1)
v2 = v2 / u2.dot(v2)
v3 = v3 / u3.dot(v3)
es = (u1, u2, u3), (lam1, lam2, lam3), (v1, v2, v3)
pickle.dump(es, open(cls._file_path(), 'wb'))
@classmethod
def load_eigensystem(cls):
return pickle.load(open(cls._file_path(), 'rb'))
def symbolic_three_state_eigensystem():
return _SymbolicThreeStateEigensystem.load_eigensystem()
|
nilq/baby-python
|
python
|
"""Author: Brandon Trabucco.
Very the installation of GloVe is function.
"""
import glove
config = glove.configuration.Configuration(
embedding=50,
filedir="./embeddings/",
length=127,
start_word="</StArT/>",
end_word="</StoP/>",
unk_word="</UnKnOwN/>")
vocab, embeddings = glove.load(config)
assert len(vocab.reverse_vocab) == 127, ""
for w in vocab.reverse_vocab:
assert w in vocab.vocab, ""
assert vocab.word_to_id(config.start_word) == vocab.start_id, ""
assert vocab.word_to_id(config.end_word) == vocab.end_id, ""
assert vocab.word_to_id(config.unk_word) == vocab.unk_id, ""
assert vocab.word_to_id("./.2!#&*@^@%") == vocab.unk_id, ""
assert vocab.id_to_word(vocab.start_id) == config.start_word, ""
assert vocab.id_to_word(vocab.end_id) == config.end_word, ""
assert vocab.id_to_word(vocab.unk_id) == config.unk_word, ""
assert vocab.id_to_word(11182819) == config.unk_word, ""
assert embeddings.shape[0] == 127, ""
assert embeddings.shape[1] == 50, ""
assert embeddings.size == 127 * 50, ""
print("All test cases passed.")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
程序思想:
有两个本地语音库,美音库Speech_US,英音库Speech_US
调用有道api,获取语音MP3,存入对应的语音库中
主要接口:
word_pronounce() 单词发音
multi_thread_download() 单词发音的批量多线程下载
'''
import urllib.request
from concurrent.futures import ThreadPoolExecutor
import os
from playsound import playsound
class pronounciation():
def __init__(self, type=0, word='hellow'):
'''
调用youdao API
type = 0:美音
type = 1:英音
判断当前目录下是否存在两个语音库的目录
如果不存在,创建
'''
word = word.lower() # 小写
self._type = type # 发音方式
self._word = word # 单词
# 文件根目录
self._dirRoot = os.path.dirname(os.path.abspath(__file__))
if 0 == self._type:
self._dir_speech = os.path.join(self._dirRoot + '/..', 'Speech_US') # 美音库
else:
self._dir_speech = os.path.join(self._dirRoot + '/..', 'Speech_EN') # 英音库
# 判断是否存在美音库
# print(os.path)
if not os.path.exists('../Speech_US'):
# 不存在,就创建
os.makedirs('../Speech_US')
# 判断是否存在英音库
if not os.path.exists('../Speech_EN'):
# 不存在,就创建
os.makedirs('../Speech_EN')
def word_input(self, word_and_type):
'''
测试使用 单词的输入 形如 [(word,type),(word,type),(word,type)]的list
'''
word = 'hello'
print('input word \nEnds with a #')
while word != '#':
word = input('word: ')
if word == '#':
break
type = input('type( US(0) or EN(1) or both(2) ): ')
if type == '1':
t = 1
elif type == '0':
t = 0
else:
t = 2
word_and_type.append((word, t))
def print_wordlist(self, word_and_type):
for cur in word_and_type:
print('word: ' + cur[0] + ' type: ' + str(cur[1]))
def down(self, w_t):
'''
下载单词的MP3
判断语音库中是否有对应的MP3
如果没有就下载
'''
word = w_t[0].lower()
type = w_t[1]
dir_speech = self._get_dir_speech(type)
tmp = self._get_mp3_file_path(word, type, dir_speech)[0]
filePath = self._get_mp3_file_path(word, type, dir_speech)[1]
fileName = self._get_mp3_file_path(word, type, dir_speech)[2]
if tmp is False:
cur_url = self._getURL(word, type)
# 组合URL
# 调用下载程序,下载到目标文件夹
# print('不存在 %s.mp3 文件\n将URL:\n' % word, self._url, '\n下载到:\n', self._filePath)
# 下载到目标地址
# print('%s.mp3 正在下载\n' % fileName)
urllib.request.urlretrieve(cur_url, filename=filePath)
# print('%s.mp3 下载完成\n' % fileName)
else:
pass
# print('已经存在 %s.mp3, 不需要下载' % fileName)
# 返回声音文件路径
return filePath
def _getURL(self, word, type):
'''
私有函数,生成发音的目标URL
http://dict.youdao.com/dictvoice?type=0&audio=
'''
url = r'http://dict.youdao.com/dictvoice?type=' + str(
type) + r'&audio=' + word
return url
def _get_mp3_file_path(self, word, type, dir_speech):
'''
获取单词的MP3本地文件路径
如果有MP3文件,返回路径(绝对路径)
如果没有,返回None
'''
word = word.lower() # 小写
# print('word: '+self._word+' type: '+str(self._type)+'\n')
if type == 0:
fileName = word + '_US.mp3'
else:
fileName = word + '_EN.mp3'
filePath = os.path.join(dir_speech, fileName)
# 判断是否存在这个MP3文件
if os.path.exists(filePath):
# 存在这个mp3
return (True, filePath, fileName)
else:
# 不存在这个MP3,返回none
return (False, filePath, fileName)
def _get_dir_speech(self, type): # 返回MP3文件的上一级绝对路径
if 0 == type:
dir_speech = os.path.join(self._dirRoot + '/..', 'Speech_US') # 美音库
else:
dir_speech = os.path.join(self._dirRoot + '/..', 'Speech_EN') # 英音库
return dir_speech
def word_pronounce(self, w_t=('hello', 0)):
'''
实现 单词发音
如果单词发音已经下载,直接发音
如果尚未下载,将进行下载,并发音
输入参数为一个二元组
第一个参数:单词
第二个参数:单词发音类别(0:美音 1:英音 2:函数内重新判断 <对应美音,英音全都下载了的情况> )
'''
self._word = w_t[0]
self._type = w_t[1]
if w_t[1] == 2:
print('US(0) or EN(1): ')
self._type = input()
dir_speech = self._get_dir_speech(self._type)
tmp = self._get_mp3_file_path(self._word, self._type, dir_speech)
if tmp[0] is False:
# print("该单词尚未下载\n")
# print("即将下载\n")
self.down(w_t)
self.word_pronounce(w_t)
else:
playsound(tmp[1])
def multi_thread_download(self, word_and_type, num=9):
'''
函数实现多线程批量单词发音下载功能
输入参数包括两部分
1.一个由二元组组成的list 二元粗参数 :第一个参数:单词
第二个参数:单词发音类别(0:美音 1:英音 2:美音,英音全都下载 )
形如 [(word,type),(word,type),(word,type)]的list
2.线程池大小 默认为9 可以不输入
最佳线程池大小 2N+1 (N为电脑cpu个数)
'''
# 多线程实现参考 https://www.jb51.net/article/170571.htm
pool = ThreadPoolExecutor(num) # 线程池的大小
for cur_w_t in word_and_type:
if cur_w_t[1] == 2:
new1_w_t = (cur_w_t[0], 0)
new2_w_t = (cur_w_t[0], 1)
word_and_type.append(new1_w_t)
word_and_type.append(new2_w_t)
continue
pool.submit(self.down, cur_w_t)
'''
if __name__ == "__main__":
word_and_type = []
ss = pronounciation()
ss.word_input(word_and_type) # 输入函数 供测试使用
ss.multi_thread_download(word_and_type)
ss.word_pronounce(('Lebron', 0))
'''
|
nilq/baby-python
|
python
|
# **********************************************************************
# Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory
#
# All Rights Reserved.
# For any other permission, please contact the Legal Office at JHU/APL.
# **********************************************************************
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 23:33:28 2020
@author: abhi0
"""
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
tempTilda=''
for i in digits:
tempTilda=tempTilda+str(i)
temp=re.split('',tempTilda)
temp=temp[1:len(temp)-1]
sumIp=1
sumOp=[]
carOp=[]
carFlag=1
for i in reversed(temp):
if sumIp==1 and carFlag==1:
tempPrime=int(i)+1
else:
tempPrime=int(i)
if tempPrime>9:
sumOp.append(0)
carOp.append(1)
carFlag=1
sumIp=1
else:
sumOp.append(tempPrime)
carOp.append(0)
carFlag=0
sumIp=0
totSum=[]
if carOp[len(carOp)-1]==1:
totSum.append(carOp[len(carOp)-1])
totSum.extend(sumOp)
print(totSum)
else:
totSum.extend(sumOp[::-1])
return totSum
|
nilq/baby-python
|
python
|
from unyt import unyt_array, unyt_quantity
from astropy.units import Quantity
import logging
from more_itertools import always_iterable
import numpy as np
pyxsimLogger = logging.getLogger("pyxsim")
ufstring = "%(name)-3s : [%(levelname)-9s] %(asctime)s %(message)s"
cfstring = "%(name)-3s : [%(levelname)-18s] %(asctime)s %(message)s"
pyxsim_sh = logging.StreamHandler()
# create formatter and add it to the handlers
formatter = logging.Formatter(ufstring)
pyxsim_sh.setFormatter(formatter)
# add the handler to the logger
pyxsimLogger.addHandler(pyxsim_sh)
pyxsimLogger.setLevel('INFO')
pyxsimLogger.propagate = False
mylog = pyxsimLogger
def parse_value(value, default_units, ds=None):
if isinstance(value, Quantity):
value = unyt_quantity.from_astropy(value)
if ds is None:
quan = unyt_quantity
else:
quan = ds.quan
if isinstance(value, unyt_quantity):
return quan(value.v, value.units).in_units(default_units)
elif isinstance(value, tuple):
return quan(value[0], value[1]).in_units(default_units)
else:
return quan(value, default_units)
def isunitful(a):
if isinstance(a, (Quantity, unyt_array)):
return True
elif isinstance(a, tuple):
try:
unyt_array(a[0], a[1])
return True
except:
pass
return False
def ensure_list(obj):
return list(always_iterable(obj))
def validate_parameters(first, second, skip=None):
if skip is None:
skip = []
keys1 = list(first.keys())
keys2 = list(second.keys())
keys1.sort()
keys2.sort()
if keys1 != keys2:
raise RuntimeError("The two inputs do not have the same parameters!")
for k1, k2 in zip(keys1, keys2):
if k1 not in skip:
v1 = first[k1][()]
v2 = first[k2][()]
if isinstance(v1, (str, bytes)) or isinstance(v2, (str, bytes)):
check_equal = v1 == v2
else:
check_equal = np.allclose(np.array(v1), np.array(v2), rtol=0.0, atol=1.0e-10)
if not check_equal:
raise RuntimeError(f"The values for the parameter '{k1}' in the two inputs"
f" are not identical ({v1} vs. {v2})!")
def merge_files(input_files, output_file, overwrite=False,
add_exposure_times=False):
r"""
Helper function for merging PhotonList or EventList HDF5 files.
Parameters
----------
input_files : list of strings
List of filenames that will be merged together.
output_file : string
Name of the merged file to be outputted.
overwrite : boolean, default False
If a the output file already exists, set this to True to
overwrite it.
add_exposure_times : boolean, default False
If set to True, exposure times will be added together. Otherwise,
the exposure times of all of the files must be the same.
Examples
--------
>>> from pyxsim import merge_files
>>> merge_files(["events_0.h5","events_1.h5","events_3.h5"], "events.h5",
... overwrite=True, add_exposure_times=True)
Notes
-----
Currently, to merge files it is mandated that all of the parameters have the
same values, with the exception of the exposure time parameter "exp_time". If
add_exposure_times=False, the maximum exposure time will be used.
"""
from collections import defaultdict
from pathlib import Path
import h5py
if Path(output_file).exists() and not overwrite:
raise IOError(f"Cannot overwrite existing file {output_file}. "
"If you want to do this, set overwrite=True.")
f_in = h5py.File(input_files[0], "r")
f_out = h5py.File(output_file, "w")
exp_time_key = ""
p_out = f_out.create_group("parameters")
for key, param in f_in["parameters"].items():
if key.endswith("exp_time"):
exp_time_key = key
else:
p_out[key] = param[()]
skip = [exp_time_key] if add_exposure_times else []
for fn in input_files[1:]:
f = h5py.File(fn, "r")
validate_parameters(f_in["parameters"], f["parameters"], skip=skip)
f.close()
f_in.close()
data = defaultdict(list)
tot_exp_time = 0.0
for i, fn in enumerate(input_files):
f = h5py.File(fn, "r")
if add_exposure_times:
tot_exp_time += f["/parameters"][exp_time_key][()]
else:
tot_exp_time = max(tot_exp_time, f["/parameters"][exp_time_key][()])
for key in f["/data"]:
data[key].append(f["/data"][key][:])
f.close()
p_out[exp_time_key] = tot_exp_time
d = f_out.create_group("data")
for k in data:
d.create_dataset(k, data=np.concatenate(data[k]))
f_out.close()
|
nilq/baby-python
|
python
|
import PIL
from PIL import Image, ImageOps, ImageEnhance
import numpy as np
import sys
import os, cv2
import csv
import pandas as pd
myDir = "..\GujOCR\Output"
#Useful function
def createFileList(myDir, format='.png'):
fileList = []
print(myDir)
for root, dirs, files in os.walk(myDir, topdown=False):
for name in files:
if name.endswith(format):
fullName = os.path.join(root, name)
fileList.append(fullName)
return fileList
columnNames = list()
for i in range(784):
pixel = 'p'
pixel += str(i)
columnNames.append(pixel)
l = os.listdir("..\GujOCR\Output")
print(l)
dic = {val : idx for idx, val in enumerate(l)}
print(dic)
train_data = pd.DataFrame(columns = columnNames)
train_data.to_csv("trainset28.csv",index = False)
label_count = list()
print(len(l))
for i in range(len(l)):
mydir = 'OUTPUT/' + l[i]
fileList = createFileList(mydir)
for file in fileList:
# print("hello")
img_file = Image.open(file) # imgfile.show()
width, height = img_file.size
format = img_file.format
mode = img_file.mode
label_count.append(dic[l[i]])
inverted_image = img_file.convert('RGB')
im_invert = ImageOps.invert(inverted_image)
size = (28, 28)
new_image = img_file.resize(size)
enhancer = ImageEnhance.Contrast(new_image)
new_image = enhancer.enhance(3)
img_grey = new_image.convert('L')
value = np.asarray(img_grey.getdata(), dtype=np.int).reshape((img_grey.size[1], img_grey.size[0]))
value = value.flatten()
with open("trainset28.csv", 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(value)
read_data = pd.read_csv('trainset28.csv')
read_data['Label'] = label_count
print(read_data)
#Write back dataframe to csv
read_data.to_csv("training_label28.csv",index = False)
print(train_data)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
import frappe
import re
def execute():
for srl in frappe.get_all('Salary Slip',['name']):
if srl.get("name"):
substring = re.search("\/(.*?)\/",srl.get("name")).group(1)
emp = frappe.db.get_value('Employee',{'name':substring},'user_id')
if "Employee" in frappe.get_roles(emp) and "HR Manager" not in frappe.get_roles(emp) and len(frappe.get_all('User Permission',filters={'allow':"Salary Slip",'for_value':srl.get("name"),'user':emp}))==0:
print(emp,"***",substring)
permission=frappe.new_doc('User Permission')
permission.user= emp
permission.allow= 'Salary Slip'
permission.for_value= srl.get("name")
permission.apply_to_all_doctypes = 0
permission.applicable_for = 'Salary Slip'
permission.save()
#homzhub_customization.homzhub_customization.patches.set_salary_permission.execute
|
nilq/baby-python
|
python
|
# coding: utf-8
r"""timeout decorators for Windows and Linux
Beware that the Windows and the Linux decorator versions
do not raise the same exception if the timeout is exceeded
"""
import platform
# import errno
# import os
import signal
import multiprocessing
import multiprocessing.pool
from functools import wraps
# Python 2 compatibility.
try:
TimeoutError
except NameError:
TimeoutError = RuntimeError
def timeout(max_timeout):
r"""Use the right timeout based on platform.system()
Parameters
----------
max_timeout : int or float
The maximum time in seconds for the decorated function to complete
"""
if platform.system() == "Windows":
return timeout_windows(max_timeout)
elif platform.system() == "Linux":
return timeout_linux(max_timeout)
else:
raise NotImplementedError
def timeout_windows(max_timeout):
"""Timeout decorator, parameter in seconds.
Parameters
----------
max_timeout : int or float
The maximum time in seconds for the decorated function to complete
Raises
------
multiprocessing.TimeoutError
if the function call exceeds max_timeout
"""
def timeout_decorator(item):
"""Wrap the original function."""
@wraps(item)
def func_wrapper(*args, **kwargs):
"""Closure for function."""
pool = multiprocessing.pool.ThreadPool(processes=1)
async_result = pool.apply_async(item, args, kwargs)
# raises a TimeoutError if execution exceeds max_timeout
return async_result.get(max_timeout)
return func_wrapper
return timeout_decorator
# class TimeoutError(Exception):
# r"""Error for the Linux version of the timeout decorator"""
# pass
def timeout_linux(max_timeout):
"""Timeout decorator, parameter in seconds.
Parameters
----------
max_timeout : int or float
The maximum time in seconds for the decorated function to complete
Raises
------
TimeoutError
if the function call exceeds max_timeout
"""
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(max_timeout)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
|
nilq/baby-python
|
python
|
import argparse
import calendar
import dotenv
import json
import libraries.api
import libraries.handle_file
import libraries.record
import logging
import logging.config
import os
import pandas as pd
import requests
import time
from csv import writer
from oauthlib.oauth2 import BackendApplicationClient, TokenExpiredError
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2Session
from typing import Callable, Dict, Set, TextIO
dotenv_file = dotenv.find_dotenv()
dotenv.load_dotenv(dotenv_file)
logging.config.fileConfig('logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class RecordsBuffer:
"""
A buffer of records. DO NOT INSTANTIATE THIS CLASS DIRECTLY.
Instead, instantiate one of its subclasses:
- AlmaRecordsBuffer: A buffer of records with MMS ID and OCLC number
- WorldCatRecordsBuffer: A buffer of records with OCLC number only
Attributes
----------
auth: HTTPBasicAuth
The HTTP Basic Auth object used when requesting an access token
oauth_session: OAuth2Session
The OAuth 2 Session object used to request an access token and make HTTP
requests to the WorldCat Metadata API (note that the OAuth2Session class
is a subclass of requests.Session)
Methods
-------
get_transaction_id()
Builds transaction_id to include with WorldCat Metadata API request
make_api_request(api_request, api_url)
Makes the specified API request to the WorldCat Metadata API
"""
def __init__(self) -> None:
"""Initializes a RecordsBuffer object by creating its OAuth2Session."""
logger.debug('Started RecordsBuffer constructor...')
self.contents = None
logger.debug(f'{type(self.contents)=}')
# Create OAuth2Session for WorldCat Metadata API
logger.debug('Creating OAuth2Session...')
self.auth = HTTPBasicAuth(os.environ['WORLDCAT_METADATA_API_KEY'],
os.environ['WORLDCAT_METADATA_API_SECRET'])
logger.debug(f'{type(self.auth)=}')
client = BackendApplicationClient(
client_id=os.environ['WORLDCAT_METADATA_API_KEY'],
scope=['WorldCatMetadataAPI refresh_token'])
token = {
'access_token': os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN'],
'expires_at': float(
os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN_EXPIRES_AT']),
'token_type': os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN_TYPE']
}
self.oauth_session = OAuth2Session(client=client, token=token)
logger.debug(f'{type(self.oauth_session)=}')
logger.debug('OAuth2Session created.')
logger.debug('Completed RecordsBuffer constructor.')
def __len__(self) -> int:
"""Returns the number of records in this records buffer.
Returns
-------
int
The number of records in this records buffer
Raises
------
TypeError
If the contents attribute is not defined (i.e. is None)
"""
return len(self.contents)
def get_transaction_id(self) -> str:
"""Builds transaction_id to include with WorldCat Metadata API request.
Returns
-------
str
The transaction_id
"""
transaction_id = ''
if ('OCLC_INSTITUTION_SYMBOL' in os.environ
or 'WORLDCAT_PRINCIPAL_ID' in os.environ):
# Add OCLC Institution Symbol, if present
transaction_id = os.getenv('OCLC_INSTITUTION_SYMBOL', '')
if transaction_id != '':
transaction_id += '_'
# Add timestamp and, if present, your WorldCat Principal ID
transaction_id += time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
if 'WORLDCAT_PRINCIPAL_ID' in os.environ:
transaction_id += f"_{os.getenv('WORLDCAT_PRINCIPAL_ID')}"
logger.debug(f'{transaction_id=}')
return transaction_id
def make_api_request(
self,
api_request: Callable[..., requests.models.Response],
api_url: str) -> requests.models.Response:
"""Makes the specified API request to the WorldCat Metadata API.
Parameters
----------
api_request: Callable[..., requests.models.Response]
The specific WorldCat Metadata API request to make
api_url: str
The specific WorldCat Metadata API URL to use
Returns
-------
requests.models.Response
The API response returned by the api_request function
"""
transaction_id = self.get_transaction_id()
if transaction_id != '':
api_url += f"&transactionID={transaction_id}"
headers = {"Accept": "application/json"}
response = None
# Make API request
try:
response = api_request(api_url, headers=headers)
except TokenExpiredError as e:
logger.debug(f'Access token {self.oauth_session.access_token} '
f'expired. Requesting new access token...')
datetime_format = '%Y-%m-%d %H:%M:%SZ'
# Confirm the epoch is January 1, 1970, 00:00:00 (UTC).
# See https://docs.python.org/3.8/library/time.html for an
# explanation of the term 'epoch'.
system_epoch = time.strftime(datetime_format, time.gmtime(0))
expected_epoch = '1970-01-01 00:00:00Z'
if system_epoch != expected_epoch:
logger.warning(f"The system's epoch ({system_epoch}) is not "
f"equal to the expected epoch ({expected_epoch}). There "
f"may therefore be issues in determining whether the "
f"WorldCat Metadata API's refresh token has expired.")
# Convert the WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT value
# to a float representing seconds since the epoch.
# Note that the WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT value
# is a string in ISO 8601 format, except that it substitutes the 'T'
# delimiter (which separates the date from the time) for a space, as
# in '2021-09-30 22:43:07Z'.
refresh_token_expires_at = 0.0
if 'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT' in os.environ:
logger.debug(f'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT '
f'variable exists in .env file, so using this value: '
f'{os.getenv("WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT")}'
f' (UTC), which will be converted to seconds since the '
f'epoch')
refresh_token_expires_at = calendar.timegm(
time.strptime(
os.getenv(
'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT'),
datetime_format))
refresh_token_expires_in = refresh_token_expires_at - time.time()
logger.debug(f'{refresh_token_expires_at=} seconds since the epoch')
logger.debug(f'Current time: {time.time()} seconds since the epoch,'
f' which is {time.strftime(datetime_format, time.gmtime())} '
f'(UTC). So the Refresh Token (if one exists) expires in '
f'{refresh_token_expires_in} seconds.')
# Obtain a new Access Token
token = None
if ('WORLDCAT_METADATA_API_REFRESH_TOKEN' in os.environ
and refresh_token_expires_in > 25):
# Use Refresh Token to request new Access Token
token = self.oauth_session.refresh_token(
os.environ['OCLC_AUTHORIZATION_SERVER_TOKEN_URL'],
refresh_token=os.getenv(
'WORLDCAT_METADATA_API_REFRESH_TOKEN'),
auth=self.auth)
else:
# Request Refresh Token and Access Token
token = self.oauth_session.fetch_token(
os.environ['OCLC_AUTHORIZATION_SERVER_TOKEN_URL'],
auth=self.auth)
logger.debug(f"Refresh token granted ({token['refresh_token']})"
f", which expires at {token['refresh_token_expires_at']}")
# Set Refresh Token environment variables and update .env file
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_REFRESH_TOKEN',
token['refresh_token'])
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT',
token['refresh_token_expires_at'])
logger.debug(f'{token=}')
logger.debug(f'New access token granted: '
f'{self.oauth_session.access_token}')
# Set environment variables based on new Access Token info and
# update .env file accordingly
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN',
token['access_token'])
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN_TYPE',
token['token_type'])
logger.debug(f"{token['expires_at']=}")
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN_EXPIRES_AT',
str(token['expires_at']))
response = api_request(api_url, headers=headers)
libraries.api.log_response_and_raise_for_status(response)
return response
class AlmaRecordsBuffer(RecordsBuffer):
"""
A buffer of Alma records, each with an MMS ID and OCLC number.
Attributes
----------
oclc_num_dict: Dict[str, str]
A dictionary containing each record's original OCLC number (key) and its
MMS ID (value)
records_with_current_oclc_num: TextIO
The CSV file object where records with a current OCLC number are added
records_with_current_oclc_num_writer: writer
The CSV writer object for the records_with_current_oclc_num file object
records_with_old_oclc_num: TextIO
The CSV file object where records with an old OCLC number are added
records_with_old_oclc_num_writer: writer
The CSV writer object for the records_with_old_oclc_num file object
records_with_errors: TextIO
The CSV file object where records are added if an error is encountered
records_with_errors_writer: writer
The CSV writer object for the records_with_errors file object
Methods
-------
add(orig_oclc_num, mms_id)
Adds the given record to this buffer (i.e. to oclc_num_dict)
process_records(results)
Checks each record in oclc_num_dict for the current OCLC number
remove_all_records()
Removes all records from this buffer (i.e. clears oclc_num_dict)
"""
def __init__(self,
records_with_current_oclc_num: TextIO,
records_with_old_oclc_num: TextIO,
records_with_errors: TextIO) -> None:
"""Instantiates an AlmaRecordsBuffer object.
Parameters
----------
records_with_current_oclc_num: TextIO
The CSV file object where records with a current OCLC number are
added
records_with_old_oclc_num: TextIO
The CSV file object where records with an old OCLC number are added
records_with_errors: TextIO
The CSV file object where records are added if an error is
encountered
"""
logger.debug('Started AlmaRecordsBuffer constructor...')
self.oclc_num_dict = {}
logger.debug(f'{type(self.oclc_num_dict)=}')
self.records_with_current_oclc_num = records_with_current_oclc_num
self.records_with_current_oclc_num_writer = \
writer(records_with_current_oclc_num)
self.records_with_old_oclc_num = records_with_old_oclc_num
self.records_with_old_oclc_num_writer = \
writer(records_with_old_oclc_num)
self.records_with_errors = records_with_errors
self.records_with_errors_writer = writer(records_with_errors)
# Create OAuth2Session for WorldCat Metadata API
super().__init__()
self.contents = self.oclc_num_dict
logger.debug(f'{type(self.contents)=}')
logger.debug('Completed AlmaRecordsBuffer constructor.\n')
def __str__(self) -> str:
"""Returns a string listing the contents of this records buffer.
In specific, this method lists the contents of the OCLC Number
dictionary.
Returns
-------
str
The contents of the OCLC Number dictionary
"""
return (f'Records buffer contents ({{OCLC Number: MMS ID}}): '
f'{self.oclc_num_dict}')
def add(self, orig_oclc_num: str, mms_id: str) -> None:
"""Adds the given record to this buffer (i.e. to oclc_num_dict).
Parameters
----------
orig_oclc_num: str
The record's original OCLC number
mms_id: str
The record's MMS ID
Raises
------
AssertionError
If the original OCLC number is already in the OCLC Number dictionary
"""
assert orig_oclc_num not in self.oclc_num_dict, (f'OCLC number '
f'{orig_oclc_num} already exists in records buffer with MMS ID '
f'{self.oclc_num_dict[orig_oclc_num]}')
self.oclc_num_dict[orig_oclc_num] = mms_id
logger.debug(f'Added {orig_oclc_num} to records buffer.')
def process_records(self, results: Dict[str, int]) -> None:
"""Checks each record in oclc_num_dict for the current OCLC number.
This is done by making a GET request to the WorldCat Metadata API:
https://worldcat.org/bib/checkcontrolnumbers?oclcNumbers={oclcNumbers}
Parameters
----------
results: Dict[str, int]
A dictionary containing the total number of records in the following
categories: records with the current OCLC number, records with an
old OCLC number, records with errors
Raises
------
json.decoder.JSONDecodeError
If there is an error decoding the API response
"""
logger.debug('Started processing records buffer...')
api_response_error_msg = ('Problem with Get Current OCLC Number API '
'response')
# Build URL for API request
url = (f"{os.environ['WORLDCAT_METADATA_SERVICE_URL']}"
f"/bib/checkcontrolnumbers"
f"?oclcNumbers={','.join(self.oclc_num_dict.keys())}")
try:
api_response = super().make_api_request(
self.oauth_session.get,
url
)
json_response = api_response.json()
logger.debug(f'Get Current OCLC Number API response:\n'
f'{json.dumps(json_response, indent=2)}')
for record_index, record in enumerate(json_response['entry'],
start=1):
found_requested_oclc_num = record['found']
is_current_oclc_num = not record['merged']
# Look up MMS ID based on OCLC number
mms_id = self.oclc_num_dict[record['requestedOclcNumber']]
logger.debug(f'Started processing record #{record_index} (OCLC '
f'number {record["requestedOclcNumber"]})...')
logger.debug(f'{is_current_oclc_num=}')
if not found_requested_oclc_num:
logger.exception(f'{api_response_error_msg}: OCLC number '
f'{record["requestedOclcNumber"]} not found')
results['num_records_with_errors'] += 1
# Add record to
# records_with_errors_when_getting_current_oclc_number.csv
if self.records_with_errors.tell() == 0:
# Write header row
self.records_with_errors_writer.writerow([
'MMS ID',
'OCLC Number',
'Error'
])
self.records_with_errors_writer.writerow([
mms_id,
record['requestedOclcNumber'],
f'{api_response_error_msg}: OCLC number not found'
])
elif is_current_oclc_num:
results['num_records_with_current_oclc_num'] += 1
# Add record to already_has_current_oclc_number.csv
if self.records_with_current_oclc_num.tell() == 0:
# Write header row
self.records_with_current_oclc_num_writer.writerow([
'MMS ID',
'Current OCLC Number'
])
self.records_with_current_oclc_num_writer.writerow([
mms_id,
record['currentOclcNumber']
])
else:
results['num_records_with_old_oclc_num'] += 1
# Add record to needs_current_oclc_number.csv
if self.records_with_old_oclc_num.tell() == 0:
# Write header row
self.records_with_old_oclc_num_writer.writerow([
'MMS ID',
'Current OCLC Number',
'Original OCLC Number'
])
self.records_with_old_oclc_num_writer.writerow([
mms_id,
record['currentOclcNumber'],
record['requestedOclcNumber']
])
logger.debug(f'Finished processing record #{record_index}.\n')
except json.decoder.JSONDecodeError:
# except (requests.exceptions.JSONDecodeError,
# json.decoder.JSONDecodeError):
logger.exception(f'{api_response_error_msg}: Error decoding JSON')
logger.exception(f'{api_response.text=}')
# Re-raise exception so that the script is halted (since future API
# requests may result in the same error)
raise
logger.debug('Finished processing records buffer.')
def remove_all_records(self) -> None:
"""Removes all records from this buffer (i.e. clears oclc_num_dict)."""
self.oclc_num_dict.clear()
logger.debug(f'Cleared records buffer.')
logger.debug(self.__str__() + '\n')
class WorldCatRecordsBuffer(RecordsBuffer):
"""
A buffer of WorldCat records, each with an OCLC number.
Attributes
----------
oclc_num_set: Set[str]
A set containing each record's OCLC number
records_with_holding_already_set: TextIO
The CSV file object where records whose holding is already set are added
(i.e. records that were not updated)
records_with_holding_already_set_writer: writer
The CSV writer object for the records_with_holding_already_set file
object
records_with_holding_successfully_set: TextIO
The CSV file object where records whose holding was successfully set are
added (i.e. records that were successfully updated)
records_with_holding_successfully_set_writer: writer
The CSV writer object for the records_with_holding_successfully_set file
object
records_with_errors: TextIO
The CSV file object where records are added if an error is encountered
records_with_errors_writer: writer
The CSV writer object for the records_with_errors file object
Methods
-------
add(oclc_num)
Adds the given record to this buffer (i.e. to oclc_num_set)
process_records(results)
Attempts to set the institution holding for each record in oclc_num_set
remove_all_records()
Removes all records from this buffer (i.e. clears oclc_num_set)
"""
def __init__(self,
records_with_holding_already_set: TextIO,
records_with_holding_successfully_set: TextIO,
records_with_errors: TextIO) -> None:
"""Instantiates a WorldCatRecordsBuffer object.
Parameters
----------
records_with_holding_already_set: TextIO
The CSV file object where records whose holding is already set are
added (i.e. records that were not updated)
records_with_holding_successfully_set: TextIO
The CSV file object where records whose holding was successfully set
are added (i.e. records that were successfully updated)
records_with_errors: TextIO
The CSV file object where records are added if an error is
encountered
"""
logger.debug('Started WorldCatRecordsBuffer constructor...')
self.oclc_num_set = set()
logger.debug(f'{type(self.oclc_num_set)=}')
self.records_with_holding_already_set = records_with_holding_already_set
self.records_with_holding_already_set_writer = \
writer(records_with_holding_already_set)
self.records_with_holding_successfully_set = \
records_with_holding_successfully_set
self.records_with_holding_successfully_set_writer = \
writer(records_with_holding_successfully_set)
self.records_with_errors = records_with_errors
self.records_with_errors_writer = writer(records_with_errors)
# Create OAuth2Session for WorldCat Metadata API
super().__init__()
self.contents = self.oclc_num_set
logger.debug(f'{type(self.contents)=}')
logger.debug('Completed WorldCatRecordsBuffer constructor.\n')
def __str__(self) -> str:
"""Returns a string listing the contents of this records buffer.
In specific, this method lists the contents of the OCLC Number set.
Returns
-------
str
The contents of the OCLC Number set
"""
return (f'Records buffer contents (OCLC Numbers): {self.oclc_num_set}')
def add(self, oclc_num: str) -> None:
"""Adds the given record to this buffer (i.e. to oclc_num_set).
Parameters
----------
oclc_num: str
The record's OCLC number
Raises
------
AssertionError
If the OCLC number is already in the OCLC Number set
"""
assert oclc_num not in self.oclc_num_set, (f'OCLC number {oclc_num} '
f'already exists in records buffer')
self.oclc_num_set.add(oclc_num)
logger.debug(f'Added {oclc_num} to records buffer.')
def process_records(self, results: Dict[str, int]) -> None:
"""Attempts to set the holding for each record in oclc_num_set.
This is done by making a POST request to the WorldCat Metadata API:
https://worldcat.org/ih/datalist?oclcNumbers={oclcNumbers}
Parameters
----------
results: Dict[str, int]
A dictionary containing the total number of records in the following
categories: records successfully set, records already set, records
with errors
Raises
------
json.decoder.JSONDecodeError
If there is an error decoding the API response
"""
logger.debug('Started processing records buffer...')
api_response_error_msg = ('Problem with Set Holding API response')
# Build URL for API request
url = (f"{os.environ['WORLDCAT_METADATA_SERVICE_URL']}"
f"/ih/datalist?oclcNumbers={','.join(self.oclc_num_set)}")
try:
api_response = super().make_api_request(
self.oauth_session.post,
url
)
json_response = api_response.json()
logger.debug(f'Set Holding API response:\n'
f'{json.dumps(json_response, indent=2)}')
for record_index, record in enumerate(json_response['entry'],
start=1):
is_current_oclc_num = (record['requestedOclcNumber']
== record['currentOclcNumber'])
new_oclc_num = ''
oclc_num_msg = ''
if not is_current_oclc_num:
new_oclc_num = record['currentOclcNumber']
oclc_num_msg = (f'OCLC number '
f'{record["requestedOclcNumber"]} has been updated to '
f'{new_oclc_num}. Consider updating Alma record.')
logger.warning(oclc_num_msg)
oclc_num_msg = f'Warning: {oclc_num_msg}'
logger.debug(f'Started processing record #{record_index} (OCLC '
f'number {record["requestedOclcNumber"]})...')
logger.debug(f'{is_current_oclc_num=}')
logger.debug(f'{record["httpStatusCode"]=}')
logger.debug(f'{record["errorDetail"]=}')
if record['httpStatusCode'] == 'HTTP 200 OK':
results['num_records_successfully_set'] += 1
# Add record to records_with_holding_successfully_set.csv
if self.records_with_holding_successfully_set.tell() == 0:
# Write header row
self.records_with_holding_successfully_set_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Warning'
])
self.records_with_holding_successfully_set_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
oclc_num_msg
])
elif record['httpStatusCode'] == 'HTTP 409 Conflict':
results['num_records_already_set'] += 1
# Add record to records_with_holding_already_set.csv
if self.records_with_holding_already_set.tell() == 0:
# Write header row
self.records_with_holding_already_set_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
self.records_with_holding_already_set_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
(f"{api_response_error_msg}: {record['errorDetail']}. "
f"{oclc_num_msg}")
])
else:
logger.exception(f"{api_response_error_msg} for OCLC "
f"Number {record['requestedOclcNumber']}: "
f"{record['errorDetail']} ({record['httpStatusCode']})."
)
results['num_records_with_errors'] += 1
# Add record to records_with_errors_when_setting_holding.csv
if self.records_with_errors.tell() == 0:
# Write header row
self.records_with_errors_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
self.records_with_errors_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
(f"{api_response_error_msg}: {record['httpStatusCode']}"
f": {record['errorDetail']}. {oclc_num_msg}")
])
logger.debug(f'Finished processing record #{record_index}.\n')
except json.decoder.JSONDecodeError:
# except (requests.exceptions.JSONDecodeError,
# json.decoder.JSONDecodeError):
logger.exception(f'{api_response_error_msg}: Error decoding JSON')
logger.exception(f'{api_response.text=}')
# Re-raise exception so that the script is halted (since future API
# requests may result in the same error)
raise
logger.debug('Finished processing records buffer.')
def remove_all_records(self) -> None:
"""Removes all records from this buffer (i.e. clears oclc_num_set)."""
self.oclc_num_set.clear()
logger.debug(f'Cleared records buffer.')
logger.debug(self.__str__() + '\n')
def init_argparse() -> argparse.ArgumentParser:
"""Initializes and returns ArgumentParser object."""
parser = argparse.ArgumentParser(
usage=('%(prog)s [-h] [-v] --input_file INPUT_FILE --operation '
'{get_current_oclc_number, set_holding}'),
description=('For each row in the input file, perform the specified '
'operation.')
)
parser.add_argument(
'-v', '--version', action='version',
version=f'{parser.prog} version 1.0.0'
)
parser.add_argument(
'--input_file',
required=True,
type=str,
help=('the name and path of the file to be processed, which must be in '
'CSV format (e.g. '
'csv/master_list_records_with_potentially_old_oclc_num.csv)')
)
parser.add_argument(
'--operation',
required=True,
choices=['get_current_oclc_number', 'set_holding'],
help=('the operation to be performed on each row of the input file '
'(either get_current_oclc_number or set_holding)')
)
return parser
def main() -> None:
"""Performs the specified operation on every record in the input file.
Gathers the maximum OCLC numbers possible before sending the appropriate
request to the WorldCat Metadata API.
Operations:
- get_current_oclc_number
For each row, check whether the given OCLC number is the current one:
-- If so, then add the record to csv/already_has_current_oclc_number.csv
-- If not, then add the record to csv/needs_current_oclc_number.csv
-- If an error is encountered, then add the record to
csv/records_with_errors_when_getting_current_oclc_number.csv
- set_holding
For each row, set holding for the given OCLC number
-- If holding is set successfully, then add the record to
csv/records_with_holding_successfully_set.csv
-- If holding was already set, then add the record to
csv/records_with_holding_already_set.csv
-- If an error is encountered, then add the record to
csv/records_with_errors_when_setting_holding.csv
"""
# Initialize parser and parse command-line args
parser = init_argparse()
args = parser.parse_args()
# Convert input file into pandas DataFrame
data = None
if args.input_file.endswith('.csv'):
data = pd.read_csv(args.input_file, dtype='str', keep_default_na=False)
else:
logger.exception(f'Invalid format for input file ({args.input_file}). '
f'Must be a CSV file (.csv)')
return
records_already_processed = set()
logger.debug(f'{records_already_processed=}\n')
logger.debug(f'{args.operation=}')
results = None
filename_for_records_to_update = None
filename_for_records_with_no_update_needed = None
filename_for_records_with_errors = None
if args.operation == 'get_current_oclc_number':
results = {
'num_records_with_current_oclc_num': 0,
'num_records_with_old_oclc_num': 0,
'num_records_with_errors': 0
}
filename_for_records_to_update = 'csv/needs_current_oclc_number.csv'
filename_for_records_with_no_update_needed = \
'csv/already_has_current_oclc_number.csv'
filename_for_records_with_errors = \
'csv/records_with_errors_when_getting_current_oclc_number.csv'
else:
results = {
'num_records_successfully_set': 0,
'num_records_already_set': 0,
'num_records_with_errors': 0
}
filename_for_records_to_update = \
'csv/records_with_holding_successfully_set.csv'
filename_for_records_with_no_update_needed = \
'csv/records_with_holding_already_set.csv'
filename_for_records_with_errors = \
'csv/records_with_errors_when_setting_holding.csv'
with open(filename_for_records_to_update, mode='a',
newline='') as records_to_update, \
open(filename_for_records_with_no_update_needed, mode='a',
newline='') as records_with_no_update_needed, \
open(filename_for_records_with_errors, mode='a',
newline='') as records_with_errors:
records_with_errors_writer = writer(records_with_errors)
records_buffer = None
if args.operation == 'get_current_oclc_number':
records_buffer = AlmaRecordsBuffer(
records_with_no_update_needed,
records_to_update,
records_with_errors
)
else:
records_buffer = WorldCatRecordsBuffer(
records_with_no_update_needed,
records_to_update,
records_with_errors
)
logger.debug(f'{type(records_buffer)=}')
logger.debug(records_buffer)
logger.debug(f'{type(records_buffer.contents)=}')
logger.debug(f'{len(records_buffer)=}\n')
# Loop over each row in DataFrame and check whether OCLC number is the
# current one
for index, row in data.iterrows():
logger.debug(f'Started processing row {index + 2} of input file...')
error_occurred = False
error_msg = None
try:
mms_id = None
orig_oclc_num = None
if args.operation == 'get_current_oclc_number':
mms_id = row['MMS ID']
orig_oclc_num = \
row["Unique OCLC Number from Alma Record's 035 $a"]
mms_id = libraries.record.get_valid_record_identifier(
mms_id,
'MMS ID'
)
else:
orig_oclc_num = row['OCLC Number']
# Make sure OCLC Number is valid
orig_oclc_num = libraries.record.get_valid_record_identifier(
orig_oclc_num, 'OCLC number')
orig_oclc_num = \
libraries.record.remove_leading_zeros(orig_oclc_num)
if args.operation == 'get_current_oclc_number':
assert mms_id not in records_already_processed, (f'Record '
f'with MMS ID {mms_id} has already been processed.')
records_already_processed.add(mms_id)
else:
assert orig_oclc_num not in records_already_processed, (
f'Record with OCLC Number {orig_oclc_num} has already '
f'been processed.')
records_already_processed.add(orig_oclc_num)
if len(records_buffer) < int(os.environ[
'WORLDCAT_METADATA_API_MAX_RECORDS_PER_REQUEST']):
if args.operation == 'get_current_oclc_number':
records_buffer.add(orig_oclc_num, mms_id)
else:
records_buffer.add(orig_oclc_num)
else:
# records_buffer has the maximum records possible per API
# request, so process these records
logger.debug('Records buffer is full.\n')
records_buffer.process_records(results)
# Now that its records have been processed, clear buffer
records_buffer.remove_all_records()
# Add current row's data to the empty buffer
if args.operation == 'get_current_oclc_number':
records_buffer.add(orig_oclc_num, mms_id)
else:
records_buffer.add(orig_oclc_num)
except AssertionError as assert_err:
if args.operation == 'get_current_oclc_number':
logger.exception(f"An assertion error occurred when "
f"processing MMS ID '{row['MMS ID']}' (at row "
f"{index + 2} of input file): {assert_err}")
else:
logger.exception(f"An assertion error occurred when "
f"processing OCLC Number '{row['OCLC Number']}' (at "
f"row {index + 2} of input file): {assert_err}")
error_msg = f"Assertion Error: {assert_err}"
error_occurred = True
finally:
if error_occurred:
results['num_records_with_errors'] += 1
# Add record to records_with_errors spreadsheet
if args.operation == 'get_current_oclc_number':
if records_with_errors.tell() == 0:
# Write header row
records_with_errors_writer.writerow([
'MMS ID',
'OCLC Number',
'Error'
])
records_with_errors_writer.writerow([
mms_id,
orig_oclc_num,
error_msg
])
else:
if records_with_errors.tell() == 0:
# Write header row
records_with_errors_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
records_with_errors_writer.writerow([
orig_oclc_num,
'',
error_msg
])
logger.debug(f'Finished processing row {index + 2} of input '
f'file.\n')
# If records_buffer is not empty, process remaining records
if len(records_buffer) > 0:
records_buffer.process_records(results)
# logger.debug(f'{records_already_processed=}\n')
logger.debug(f'{len(records_already_processed)=}\n')
print(f'\nEnd of script. Processed {len(data.index)} rows from input file:')
if args.operation == 'get_current_oclc_number':
print(f'- {results["num_records_with_current_oclc_num"]} record(s) '
f'with current OCLC number\n'
f'- {results["num_records_with_old_oclc_num"]} record(s) with '
f'old OCLC number\n'
f'- {results["num_records_with_errors"]} record(s) with errors')
else:
print(f'- {results["num_records_successfully_set"]} record(s) updated, '
f'i.e. holding was successfully set\n'
f'- {results["num_records_already_set"]} record(s) not updated '
f'because holding was already set\n'
f'- {results["num_records_with_errors"]} record(s) with errors')
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.