hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
214af4d061b9762c5bb1ab02a0993d367bed4a37
| 7,114
|
py
|
Python
|
cogs/mod.py
|
cheddar-cheeze/kisak-bot
|
62c292fa510ee4008e350176b4b85b7067994602
|
[
"MIT"
] | 3
|
2018-09-04T02:12:24.000Z
|
2019-07-03T05:31:12.000Z
|
cogs/mod.py
|
cheddar-cheeze/kisak-bot
|
62c292fa510ee4008e350176b4b85b7067994602
|
[
"MIT"
] | 1
|
2018-05-24T23:44:08.000Z
|
2018-05-26T01:33:42.000Z
|
cogs/mod.py
|
cheddar-cheeze/kisak-bot
|
62c292fa510ee4008e350176b4b85b7067994602
|
[
"MIT"
] | 1
|
2020-05-11T09:02:48.000Z
|
2020-05-11T09:02:48.000Z
|
import discord
from discord.ext import commands
from cogs.constants import embed_color
class mod():
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True)
async def kick(self, ctx, user: discord.Member=None, *reason):
if ctx.message.author.server_permissions.ban_members:
if user is None:
embed = discord.Embed(title="Error!", description="Please specify a user to kick", color=embed_color)
await self.bot.say(embed=embed)
else:
reason = ' '.join(reason)
if reason == "":
reason = "None was specified"
g_user: discord.User = user
await self.bot.send_typing(ctx.message.channel)
embed = discord.Embed(title=user.name + " was kicked!", description="", color=embed_color)
embed.add_field(name="User id", value=user.id, inline=True)
embed.add_field(name="Reason", value=reason, inline=False)
embed.add_field(name="Server join date", value=user.joined_at, inline=False)
embed.add_field(name="Discord join date", value=user.created_at.date(), inline=False)
embed.set_thumbnail(url=user.avatar_url)
await self.bot.say(embed=embed)
odm = discord.Embed(title="A member was kicked from your server", description="", color=embed_color)
odm.add_field(name="Server Name", value=ctx.message.server.name)
odm.add_field(name="Server id", value=ctx.message.server.id, inline=True)
odm.add_field(name="User name", value=user.name)
odm.add_field(name="User id", value=user.id, inline=True)
odm.add_field(name="Who kicked them", value=ctx.message.author)
odm.add_field(name="Reason", value=reason, inline=True)
odm.set_thumbnail(url=user.avatar_url)
try:
await self.bot.send_message(ctx.message.server.owner, embed=odm)
except:
pass
dm = discord.Embed(title="You were kicked from a server", description="", color=embed_color)
dm.set_thumbnail(url=ctx.message.server.icon_url)
dm.add_field(name="Server name", value=ctx.message.server.name)
dm.add_field(name="Server id", value=ctx.message.server.id, inline=True)
dm.add_field(name="Reason", value=reason)
try:
await self.bot.send_message(g_user, embed=dm)
except:
pass
await self.bot.kick(user)
else:
embed = discord.Embed(title="Command Error!", description=ctx.message.author.mention + ",you do not have the correct permissions to use this command", color=embed_color)
await self.bot.say(embed=embed)
@commands.command(pass_context=True, no_pm=True)
async def ban(self, ctx, user: discord.Member=None, *reason):
if ctx.message.author.server_permissions.ban_members:
if user is None:
embed = discord.Embed(title="Error!", description="Please specify a user to ban", color=embed_color)
await self.bot.say(embed=embed)
else:
reason = ' '.join(reason)
if reason == "":
reason = "None was specified"
g_user: discord.User = user
await self.bot.send_typing(ctx.message.channel)
embed = discord.Embed(title=user.name + ",was banned!", description="", color=embed_color)
embed.add_field(name="User id", value=user.id, inline=True)
embed.add_field(name="Reason", value=reason, inline=False)
embed.add_field(name="Server join date", value=user.joined_at, inline=False)
embed.add_field(name="Discord join date", value=user.created_at.date(), inline=False)
embed.set_thumbnail(url=user.avatar_url)
await self.bot.say(embed=embed)
odm = discord.Embed(title="A member was banned from your server", description="", color=embed_color)
odm.add_field(name="Server Name", value=ctx.message.server.name)
odm.add_field(name="Server id", value=ctx.message.server.id, inline=True)
odm.add_field(name="User name", value=user.name)
odm.add_field(name="User id", value=user.id, inline=True)
odm.add_field(name="Who banned them", value=ctx.message.author)
odm.add_field(name="Reason", value=reason, inline=True)
odm.set_thumbnail(url=user.avatar_url)
try:
await self.bot.send_message(ctx.message.server.owner, embed=odm)
except:
pass
dm = discord.Embed(title="VAC banned from secure server", description="", color=embed_color)
dm.add_field(name="Server name", value=ctx.message.server.name)
dm.add_field(name="Server id", value=ctx.message.server.id, inline=True)
dm.add_field(name="Reason", value=reason)
dm.set_thumbnail(url="https://astolfo.life/kisak-assets/vac.png")
try:
await self.bot.send_message(g_user, embed=dm)
except:
pass
await self.bot.ban(user)
else:
embed = discord.Embed(title="Command Error!", description=ctx.message.author.mention + ",you do not have the correct permissions to use this command", color=embed_color)
await self.bot.say(embed=embed)
@commands.command(pass_context=True, no_pm=True)
async def masspm(self, ctx, *announcement):
ctx.message.delete()
if ctx.message.author == ctx.message.server.owner:
announcement = ' '.join(announcement)
if announcement == "":
embed = discord.Embed(title="Command Error!", description="You must provide a message to send", color=embed_color)
await self.bot.say(embed=embed)
else:
for user in ctx.message.server.members:
try:
embed = discord.Embed(title="Mass pm from " + ctx.message.author.name + "#" + ctx.message.author.discriminator, description=announcement, color=embed_color)
embed.set_thumbnail(url=ctx.message.author.avatar_url)
await self.bot.send_message(user, embed=embed)
except:
pass
embed = discord.Embed(title="Completed masspm!", color=embed_color)
await self.bot.say(embed=embed)
else:
embed = discord.Embed(title="Command Error!", description=ctx.message.author.mention + ",you do not have the correct permissions to use this command", color=embed_color)
await self.bot.say(embed=embed)
def setup(bot):
bot.add_cog(mod(bot))
| 57.837398
| 181
| 0.59165
| 877
| 7,114
| 4.703535
| 0.124287
| 0.065455
| 0.075636
| 0.043636
| 0.838788
| 0.818909
| 0.802909
| 0.792
| 0.792
| 0.792
| 0
| 0
| 0.295193
| 7,114
| 122
| 182
| 58.311475
| 0.822696
| 0
| 0
| 0.700855
| 0
| 0
| 0.12117
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017094
| false
| 0.068376
| 0.025641
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
215ff0556e9187b1bc00bf6f118b203033c4b464
| 13,285
|
py
|
Python
|
api/views/optimizer.py
|
maravger/central-controller
|
fa3b7bb20dd1ebd81538b6732c3748f825925a48
|
[
"MIT"
] | 1
|
2021-01-10T22:16:08.000Z
|
2021-01-10T22:16:08.000Z
|
api/views/optimizer.py
|
maravger/central-controller
|
fa3b7bb20dd1ebd81538b6732c3748f825925a48
|
[
"MIT"
] | 4
|
2018-09-05T15:14:49.000Z
|
2021-06-10T20:33:41.000Z
|
api/views/optimizer.py
|
maravger/central-controller
|
fa3b7bb20dd1ebd81538b6732c3748f825925a48
|
[
"MIT"
] | 1
|
2019-05-13T18:03:06.000Z
|
2019-05-13T18:03:06.000Z
|
from django.conf import settings
from lpsolve55 import *
U_PES_MIN = settings.GLOBAL_SETTINGS['U_PES_MIN']
U_PES_MAX = settings.GLOBAL_SETTINGS['U_PES_MAX']
U_REQ_MIN = settings.GLOBAL_SETTINGS['U_REQ_MIN']
U_REQ_MAX = settings.GLOBAL_SETTINGS['U_REQ_MAX']
X_ART_REF = settings.GLOBAL_SETTINGS['X_ART_REF']
U_PES_REF = settings.GLOBAL_SETTINGS['U_PES_REF']
U_REQ_REF = settings.GLOBAL_SETTINGS['U_REQ_REF']
K1 = settings.GLOBAL_SETTINGS['K1']
K2 = settings.GLOBAL_SETTINGS['K2']
MAX_TOTAL_CONT_PES = settings.GLOBAL_SETTINGS['MAX_TOTAL_CONT_PES']
HOSTS = len(settings.GLOBAL_SETTINGS['HOST_IPS'])
def permutate():
op1 = 0
op2 = 1
combinations = []
for i in range(0, len(U_PES_MAX[0])):
for j in range(0, len(U_PES_MAX[1])):
if (U_PES_MAX[op1][i] + U_PES_MAX[op2][j]) <= MAX_TOTAL_CONT_PES:
combination = [i, j]
combinations.append(combination)
return combinations
def permutate_ntua():
op1 = 0
op2 = 1
combinations = []
for i in range(0, len(U_PES_MAX[0])):
for j in range(0, len(U_PES_MAX[1])):
if (U_PES_MAX[op1][i] + U_PES_MAX[op2][j]) <= MAX_TOTAL_CONT_PES:
combination = [i, j]
combinations.append(combination)
for i in range(0,len(U_PES_MAX[2]),2):
for j in range (0,len(U_PES_MAX[3]),2):
if (U_PES_MAX[op1+2][i] + U_PES_MAX[op2+2][j]) <= MAX_TOTAL_CONT_PES:
combination = [i,j]
combinations.append(combination)
return combinations
def optimize(combinations, predictedWorkload):
n = len(combinations)
ncol = n
colno = []
row = []
lp = lpsolve('make_lp', 0, n)
lpsolve('set_verbose', lp, IMPORTANT)
for i in range(1, n + 1):
lpsolve('set_col_name', lp, i, 'p' + str(i))
lpsolve('set_int', lp, i, True)
lpsolve('set_add_rowmode', lp, True)
# first
for i in range(0, n):
colno.append(i + 1)
row.append(U_REQ_REF[0][combinations[i][0]])
lpsolve('add_constraintex', lp, row, GE, predictedWorkload[0])
# second
colno1 = []
row1 = []
for i in range(0, n):
colno1.append(i + 1)
row1.append(U_REQ_REF[1][combinations[i][1]])
lpsolve('add_constraint', lp, row1, GE, predictedWorkload[1])
# third
colno2 = []
row2 = []
colno2 = [0] * n
row2 = [0] * n
for i in range(0, n):
colno2[i] = i + 1
row2[i] = 1
lpsolve('add_constraint', lp, row2, GE, 0)
colno2 = [0] * n
row2 = [0] * n
colno3 = []
row3 = []
colno3 = [0] * n
row3 = [0] * n
for i in range(0, n):
colno3[i] = i + 1
row3[i] = 1
lpsolve('add_constraint', lp, row3, LE, HOSTS)
colno3 = [0] * n
row3 = [0] * n
colno4 = []
row4 = []
for i in range(0, n):
colno4.append(i + 1)
row4.append(1)
lpsolve('add_constraint', lp, row4, LE, HOSTS)
lpsolve('set_add_rowmode', lp, False)
colno5 = []
row5 = []
for i in range(0, n):
colno5.append(i + 1)
row5.append(1)
lpsolve('set_obj_fn', lp, row5)
lpsolve('set_minim', lp)
lpsolve('write_lp', lp, 'a.lp')
#print (lpsolve('get_mat', lp, 1, 2))
lpsolve('solve', lp)
#print ("objective")
print (lpsolve('get_objective', lp))
HOST_OPEN = (lpsolve('get_objective', lp))
#print ("variables")
#print (lpsolve('get_variables', lp))
#print ("constraints")
#print (lpsolve('get_constraints', lp))
temp = []
final = []
temp = (lpsolve('get_variables', lp))
lpsolve('delete_lp', lp)
print len(temp[0])
for i in range(0, len(temp[0])):
while (temp[0][i] > 0):
temp[0][i] -= 1
final.append(combinations[i])
for i in range(len(final), HOSTS):
final.append([0,0])
# start of second lp
colno = []
row = []
lp = lpsolve('make_lp', 0, n)
lpsolve('set_verbose', lp, IMPORTANT)
for i in range (1,n+1):
lpsolve('set_col_name', lp, i, 'p'+str(i))
lpsolve('set_int', lp, i, True)
lpsolve('set_add_rowmode', lp, True)
# first
for i in range (0,n):
colno.append(i+1)
row.append(U_REQ_REF[0][combinations[i][0]])
lpsolve('add_constraintex', lp, row, GE, predictedWorkload[0])
#second
colno1 = []
row1 = []
for i in range (0,n):
colno1.append(i+1)
row1.append(U_REQ_REF[1][combinations[i][1]])
lpsolve('add_constraint', lp, row1, GE, predictedWorkload[1])
#third
colno2 = []
row2 = []
colno2 = [0] * n
row2 = [0] * n
for i in range (0,n):
colno2[i] = i+1
row2[i] = 1
lpsolve('add_constraint', lp, row2, GE, 0)
colno2 = [0] * n
row2 = [0] * n
colno3 = []
row3 = []
colno3 = [0] * n
row3 = [0] * n
for i in range (0,n):
colno3[i] = i+1
row3[i] = 1
lpsolve('add_constraint', lp, row3, LE, HOST_OPEN)
colno3 = [0] * n
row3 = [0] * n
colno4 = []
row4 = []
for i in range (0,n):
colno4.append(i+1)
row4.append(1)
lpsolve('add_constraint', lp, row4, EQ, HOST_OPEN)
lpsolve('set_add_rowmode', lp, False)
colno5 = []
row5 = []
for i in range (0,n):
colno5.append(i+1)
row5.append(combinations[i][0]+int(combinations[i][1]))
lpsolve('set_obj_fn', lp, row5)
lpsolve('set_minim', lp)
lpsolve('write_lp', lp, 'a.lp')
#print (lpsolve('get_mat', lp, 1, 2))
lpsolve('solve', lp)
#print ("objective")
print (lpsolve('get_objective', lp))
#print ("variables")
#print (lpsolve('get_variables', lp))
#print ("constraints")
#print (lpsolve('get_constraints', lp))
temp = []
final = []
temp = (lpsolve('get_variables', lp))
print len(temp[0])
lpsolve('delete_lp', lp)
for i in range(0, len(temp[0])):
while (temp[0][i] > 0):
temp[0][i] -= 1
final.append(combinations[i])
for i in range(len(final), HOSTS):
final.append([0,0])
return final
#return [[1,1],[1,1]]
def optimize_ntua(combinations, predictedWorkload):
n = 25
n1 = 9
ntotal = n + n1
ncol=ntotal
colno = []
row = []
lp = lpsolve('make_lp', 0, ntotal)
lpsolve('set_verbose', lp, IMPORTANT)
for i in range (1,ntotal+1):
lpsolve('set_col_name', lp, i, 'p'+str(i))
lpsolve('set_int', lp, i, True)
lpsolve('set_add_rowmode', lp, True)
# first
for i in range (0,ntotal):
colno.append(i+1)
if i <=24:
row.append(U_REQ_REF[0][combinations[i][0]])
else:
row.append(U_REQ_REF[2][combinations[i][0]])
lpsolve('add_constraintex', lp, row, GE, predictedWorkload[0])
#second
colno1 = []
row1 = []
for i in range (0,ntotal):
colno1.append(i+1)
if i <=24:
row1.append(U_REQ_REF[1][combinations[i][1]])
else:
row1.append(U_REQ_REF[3][combinations[i][1]])
lpsolve('add_constraint', lp, row1, GE, predictedWorkload[1])
#third
colno2 = []
row2 = []
colno2 = [0] * ntotal
row2 = [0] * ntotal
for i in range (0,ntotal):
colno2[i] = i+1
row2[i] = 1
lpsolve('add_constraint', lp, row2, GE, 0)
colno2 = [0] * ntotal
row2 = [0] * ntotal
colno3 = []
row3 = []
colno3 = [0] * ntotal
row3 = [0] * ntotal
HOSTS=1
for i in range (0,ntotal):
#colno3[i] = i+1
if i<=24:
colno3[i] = i+1
row3[i] = 1
lpsolve('add_constraint', lp, row3, LE, HOSTS)
else:
colno3[i] = i+1
row3[i] = 1
lpsolve('add_constraint', lp, row3, LE, 1)
colno3 = [0] * ntotal
row3 = [0] * ntotal
colno4 = []
row4 = []
for i in range (0,ntotal):
if i<=24:
colno4.append(i+1)
row4.append(1)
else:
colno4.append(i+1)
row4.append(0)
HOSTS = 1
lpsolve('add_constraint', lp, row4, LE, HOSTS)
colno7 = []
row7 = []
for i in range (0,ntotal):
if i>24:
colno7.append(i+1)
row7.append(1)
else:
colno7.append(i+1)
row7.append(0)
HOSTS =1
lpsolve('add_constraint', lp, row7, LE, HOSTS)
HOSTS=2
lpsolve('set_add_rowmode', lp, False)
colno5 = []
row5 = []
for i in range (0,ntotal):
colno5.append(i+1)
row5.append(1)
lpsolve('set_obj_fn', lp, row5)
lpsolve('set_minim', lp)
lpsolve('write_lp', lp, 'a.lp')
#print (lpsolve('get_mat', lp, 1, 2))
lpsolve('solve', lp)
#print ("objective")
#print (lpsolve('get_objective', lp))
HOST_OPEN = (lpsolve('get_objective', lp))
#print ("variables")
#print (lpsolve('get_variables', lp))
a = []
a = (lpsolve('get_variables', lp)[0])
HOST_NTUA=0
HOSTS_EDGE=0
for i in range (0,ntotal):
#print ((combinations[i], i) if a[i]>=1.0 else 'skata')
if a[i] >= 1.0 and i < n:
HOSTS_EDGE+=int(a[i])
elif a[i]>=1.0 and i>=n:
HOST_NTUA+=1
#print HOSTS_EDGE , HOST_NTUA
#print ("constraints")
#print (lpsolve('get_constraints', lp))
temp = []
final = []
temp = (lpsolve('get_variables', lp))
#print len(temp[0])
lpsolve('delete_lp', lp)
for i in range(0, len(temp[0])):
while (temp[0][i] > 0):
temp[0][i] -= 1
final.append(combinations[i])
for i in range(len(final), HOSTS):
final.append([0,0])
#print final
#print ("---------------------------------------------")
#print ("start of second lp")
n = 25
n1 = 9
ntotal = n + n1
#print ntotal
ncol=ntotal
colno = []
row = []
lp = lpsolve('make_lp', 0, ntotal)
lpsolve('set_verbose', lp, IMPORTANT)
for i in range (1,ntotal+1):
lpsolve('set_col_name', lp, i, 'p'+str(i))
lpsolve('set_int', lp, i, True)
lpsolve('set_add_rowmode', lp, True)
# first
for i in range (0,ntotal):
colno.append(i+1)
if i <=24:
row.append(U_REQ_REF[0][combinations[i][0]])
else:
row.append(U_REQ_REF[2][combinations[i][0]])
lpsolve('add_constraintex', lp, row, GE, predictedWorkload[0])
#second
colno1 = []
row1 = []
for i in range (0,ntotal):
colno1.append(i+1)
if i <=24:
row1.append(U_REQ_REF[1][combinations[i][1]])
else:
row1.append(U_REQ_REF[3][combinations[i][1]])
lpsolve('add_constraint', lp, row1, GE, predictedWorkload[1])
#third
colno2 = []
row2 = []
colno2 = [0] * ntotal
row2 = [0] * ntotal
for i in range (0,ntotal):
colno2[i] = i+1
row2[i] = 1
lpsolve('add_constraint', lp, row2, GE, 0)
colno2 = [0] * ntotal
row2 = [0] * ntotal
colno3 = []
row3 = []
colno3 = [0] * ntotal
row3 = [0] * ntotal
HOSTS=1
for i in range (0,ntotal):
#colno3[i] = i+1
if i<=24:
colno3[i] = i+1
row3[i] = 1
lpsolve('add_constraint', lp, row3, LE, HOSTS)
else:
colno3[i] = i+1
row3[i] = 1
lpsolve('add_constraint', lp, row3, LE, 1)
colno3 = [0] * ntotal
row3 = [0] * ntotal
colno4 = []
row4 = []
for i in range (0,ntotal):
if i<=24:
colno4.append(i+1)
row4.append(1)
else:
colno4.append(i+1)
row4.append(0)
lpsolve('add_constraint', lp, row4, EQ, HOSTS_EDGE)
colno7 = []
row7 = []
for i in range (0,ntotal):
if i>24:
colno7.append(i+1)
row7.append(1)
else:
colno7.append(i+1)
row7.append(0)
lpsolve('add_constraint', lp, row7, EQ, HOST_NTUA)
lpsolve('set_add_rowmode', lp, False)
colno5 = []
row5 = []
for i in range (0,ntotal):
colno5.append(i+1)
row5.append(combinations[i][0]+int(combinations[i][1]))
lpsolve('set_obj_fn', lp, row5)
lpsolve('set_minim', lp)
lpsolve('write_lp', lp, 'a.lp')
#print (lpsolve('get_mat', lp, 1, 2))
lpsolve('solve', lp)
#print ("objective")
#print (lpsolve('get_objective', lp))
HOST_OPEN = (lpsolve('get_objective', lp))
#print ("variables")
print (lpsolve('get_variables', lp))
a = []
a = (lpsolve('get_variables', lp)[0])
HOSTS=2
temp = []
final = []
print a
for i in range (0,ntotal):
if a[i] >= 1.0 and i < n:
while (a[i]>0):
a[i] -= 1
final.append(combinations[i])
elif a[i]>=1.0 and i>=n:
while (a[i]>0):
a[i] -= 1
final.append(combinations[i])
if i == n-1:
for j in range (len(final), 1):
final.append([0,0])
for j in range (len(final), 2):
final.append([0,0])
return final
| 23.347979
| 81
| 0.522469
| 1,836
| 13,285
| 3.660675
| 0.061002
| 0.01845
| 0.036602
| 0.067103
| 0.901354
| 0.867728
| 0.846154
| 0.839012
| 0.826068
| 0.816843
| 0
| 0.051375
| 0.31577
| 13,285
| 568
| 82
| 23.389085
| 0.688009
| 0.076026
| 0
| 0.907692
| 0
| 0
| 0.083667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.015385
| null | null | 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0d087cc846ad6ae8a284c8d2fedc07248b9dc733
| 22,128
|
py
|
Python
|
proma/projects/tests/test_views.py
|
erickgnavar/Proma
|
159051f4247700166f063075b3819ae426f6d337
|
[
"MIT"
] | 3
|
2018-01-22T08:50:38.000Z
|
2021-07-16T04:08:28.000Z
|
proma/projects/tests/test_views.py
|
erickgnavar/Proma
|
159051f4247700166f063075b3819ae426f6d337
|
[
"MIT"
] | 13
|
2019-05-27T03:08:29.000Z
|
2020-01-03T03:36:04.000Z
|
proma/projects/tests/test_views.py
|
erickgnavar/Proma
|
159051f4247700166f063075b3819ae426f6d337
|
[
"MIT"
] | 1
|
2019-10-03T17:52:29.000Z
|
2019-10-03T17:52:29.000Z
|
from django.contrib.messages.storage.fallback import FallbackStorage
from django.test import RequestFactory, TestCase
from django.urls import resolve, reverse
from mixer.backend.django import mixer
from proma.enums import Currency
from .. import forms, views
from ..models import Project, Timesheet
class ProjectCreateViewTestCase(TestCase):
def setUp(self):
self.view = views.ProjectCreateView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
def test_match_expected_view(self):
url = resolve("/projects/create/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
def test_create_project(self):
client = mixer.blend("clients.Client")
data = {
"name": "test",
"client": client.id,
"start_date": "2018-01-01",
"payment_type": Project.FLAT_RATE,
"currency": Currency.USD.name,
"rate": 20,
}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(reverse("projects:project-list"), response["location"])
def test_create_project_missing_fields(self):
data = {"name": "test"}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.context_data["form"].errors) > 0)
class ProjectUpdateViewTestCase(TestCase):
def setUp(self):
self.view = views.ProjectUpdateView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
self.project = mixer.blend("projects.Project")
def test_match_expected_view(self):
url = resolve("/projects/1/update/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.project.id)
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
def test_update_project(self):
data = {
"name": "test",
"client": self.project.client.id,
"start_date": "2018-01-01",
"payment_type": Project.DAILY_RATE,
"currency": Currency.USD.name,
"rate": 20,
}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request, id=self.project.id)
self.assertEqual(response.status_code, 302)
redirect_url = reverse(
"projects:project-detail", kwargs={"id": self.project.id}
)
self.project.refresh_from_db()
self.assertEqual(self.project.payment_type, Project.DAILY_RATE)
self.assertEqual(redirect_url, response["location"])
def test_update_project_missing_fields(self):
data = {"name": "test"}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request, id=self.project.id)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.context_data["form"].errors) > 0)
class ProjectListViewTestCase(TestCase):
def setUp(self):
self.view = views.ProjectListView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
def test_match_expected_view(self):
url = resolve("/projects/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
mixer.cycle(5).blend("projects.Project")
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn("projects", response.context_data)
self.assertIn("filter", response.context_data)
self.assertEqual(response.context_data["projects"].count(), 5)
class ProjectDetailViewTestCase(TestCase):
def setUp(self):
self.view = views.ProjectDetailView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
self.project = mixer.blend("projects.Project")
def test_match_expected_view(self):
url = resolve("/projects/1/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.project.id)
self.assertEqual(response.status_code, 200)
self.assertIn("project", response.context_data)
class ExpenseCreateViewTestCase(TestCase):
def setUp(self):
self.view = views.ExpenseCreateView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
def test_match_expected_view(self):
url = resolve("/expenses/create/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
def test_create_expense(self):
project = mixer.blend("projects.Project")
data = {
"name": "test",
"project": project.id,
"date": "2018-01-01",
"amount": 10.00,
}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(reverse("projects:expense-list"), response["location"])
def test_create_expense_missing_fields(self):
data = {"name": "test"}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.context_data["form"].errors) > 0)
class ExpenseUpdateViewTestCase(TestCase):
def setUp(self):
self.view = views.ExpenseUpdateView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
self.expense = mixer.blend("projects.Expense")
def test_match_expected_view(self):
url = resolve("/expenses/1/update/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.expense.id)
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
def test_update_expense(self):
data = {
"name": "another name",
"project": self.expense.project.id,
"date": "2018-01-01",
"amount": 20,
}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request, id=self.expense.id)
self.assertEqual(response.status_code, 302)
redirect_url = reverse(
"projects:expense-detail", kwargs={"id": self.expense.id}
)
self.expense.refresh_from_db()
self.assertEqual(self.expense.name, "another name")
self.assertEqual(redirect_url, response["location"])
def test_update_expense_missing_fields(self):
data = {"name": "test"}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request, id=self.expense.id)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.context_data["form"].errors) > 0)
class ExpenseListViewTestCase(TestCase):
def setUp(self):
self.view = views.ExpenseListView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
def test_match_expected_view(self):
url = resolve("/expenses/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
mixer.cycle(5).blend("projects.Expense")
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn("expenses", response.context_data)
self.assertIn("filter", response.context_data)
self.assertEqual(response.context_data["expenses"].count(), 5)
class ExpenseDetailViewTestCase(TestCase):
def setUp(self):
self.view = views.ExpenseDetailView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
self.expense = mixer.blend("projects.Expense")
def test_match_expected_view(self):
url = resolve("/expenses/1/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.expense.id)
self.assertEqual(response.status_code, 200)
self.assertIn("expense", response.context_data)
class ProjectCreateInvoiceViewTestCase(TestCase):
def setUp(self):
self.view = views.ProjectCreateInvoiceView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
self.project = mixer.blend("projects.Project")
def test_match_expected_view(self):
url = resolve("/projects/1/create-invoice/flat/")
self.assertEqual(url.func.__name__, self.view.__name__)
url = resolve("/projects/1/create-invoice/rate/")
self.assertEqual(url.func.__name__, self.view.__name__)
url = resolve("/projects/1/create-invoice/percentage/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_flat_form(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.project.id, type="flat")
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
self.assertIsInstance(
response.context_data["form"], forms.CreateInvoiceFlatForm
)
def test_load_rate_form(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.project.id, type="rate")
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
self.assertIsInstance(
response.context_data["form"], forms.CreateInvoiceRateForm
)
def test_load_percentage_form(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.project.id, type="percentage")
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
self.assertIsInstance(
response.context_data["form"], forms.CreateInvoicePercentageForm
)
def test_create_invoice_flat(self):
request = self.factory.post("/", {"description": "test", "amount": 10})
request.user = self.user
request.session = {}
request._messages = FallbackStorage(request)
invoice = self.project.invoices.last()
self.assertIsNone(invoice)
response = self.view(request, id=self.project.id, type="flat")
self.assertEqual(response.status_code, 302)
invoice = self.project.invoices.last()
self.assertIsNotNone(invoice)
self.assertEqual(
response["location"],
reverse("invoices:invoice-detail", kwargs={"id": invoice.id}),
)
def test_create_invoice_rate(self):
request = self.factory.post(
"/", {"description": "test", "rate": 10, "units": 10}
)
request.user = self.user
request.session = {}
request._messages = FallbackStorage(request)
invoice = self.project.invoices.last()
self.assertIsNone(invoice)
response = self.view(request, id=self.project.id, type="rate")
self.assertEqual(response.status_code, 302)
invoice = self.project.invoices.last()
self.assertIsNotNone(invoice)
self.assertEqual(
response["location"],
reverse("invoices:invoice-detail", kwargs={"id": invoice.id}),
)
def test_create_invoice_percentage(self):
request = self.factory.post("/", {"description": "test", "percentage": 10})
request.user = self.user
request.session = {}
request._messages = FallbackStorage(request)
invoice = self.project.invoices.last()
self.assertIsNone(invoice)
response = self.view(request, id=self.project.id, type="percentage")
self.assertEqual(response.status_code, 302)
invoice = self.project.invoices.last()
self.assertIsNotNone(invoice)
self.assertEqual(
response["location"],
reverse("invoices:invoice-detail", kwargs={"id": invoice.id}),
)
class TimesheetCreateViewTestCase(TestCase):
def setUp(self):
self.view = views.TimesheetCreateView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
def test_match_expected_view(self):
url = resolve("/timesheets/create/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
def test_create_timesheet(self):
project = mixer.blend("projects.Project")
data = {
"label": "test",
"project": project.id,
"date_start": "2018-01-01",
"date_end": "2018-01-01",
}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(reverse("projects:timesheet-list"), response["location"])
def test_create_timesheet_missing_fields(self):
data = {"label": "test"}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.context_data["form"].errors) > 0)
class TimesheetUpdateViewTestCase(TestCase):
def setUp(self):
self.view = views.TimesheetUpdateView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
self.timesheet = mixer.blend(
"projects.Timesheet", project=mixer.blend("projects.Project")
)
def test_match_expected_view(self):
url = resolve("/timesheets/1/update/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.timesheet.id)
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context_data)
def test_update_timesheet(self):
data = {
"label": "another label",
"date_start": "2018-01-01",
"date_end": "2018-01-01",
"project": self.timesheet.project.id,
}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request, id=self.timesheet.id)
self.assertEqual(response.status_code, 302)
redirect_url = reverse(
"projects:timesheet-detail", kwargs={"id": self.timesheet.id}
)
self.timesheet.refresh_from_db()
self.assertEqual(self.timesheet.label, "another label")
self.assertEqual(redirect_url, response["location"])
def test_update_timesheet_missing_fields(self):
data = {"label": "test"}
request = self.factory.post("/", data=data)
request.user = self.user
response = self.view(request, id=self.timesheet.id)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.context_data["form"].errors) > 0)
class TimesheetListViewTestCase(TestCase):
def setUp(self):
self.view = views.TimesheetListView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
def test_match_expected_view(self):
url = resolve("/timesheets/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
mixer.cycle(5).blend("projects.Timesheet")
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn("timesheets", response.context_data)
self.assertIn("filter", response.context_data)
self.assertEqual(response.context_data["timesheets"].count(), 5)
def test_valid_assign_project_form(self):
mixer.cycle(5).blend("projects.Timesheet", project=None)
ids = Timesheet.objects.values_list("id", flat=True)
project = mixer.blend("projects.Project")
request = self.factory.post(
"/", {"project": project.id, "timesheets": ",".join(map(str, ids))}
)
request.session = {}
request._messages = FallbackStorage(request)
request.user = self.user
self.assertEqual(Timesheet.objects.filter(project=None).count(), 5)
response = self.view(request)
self.assertEqual(response.status_code, 302)
expected_url = reverse("projects:timesheet-list")
self.assertEqual(response["location"], expected_url)
self.assertEqual(Timesheet.objects.filter(project=None).count(), 0)
def test_invalid_assign_project_form(self):
mixer.cycle(5).blend("projects.Timesheet", project=None)
ids = Timesheet.objects.values_list("id", flat=True)
request = self.factory.post(
"/", {"project": None, "timesheets": ",".join(map(str, ids))}
)
request.session = {}
request._messages = FallbackStorage(request)
request.user = self.user
self.assertEqual(Timesheet.objects.filter(project=None).count(), 5)
response = self.view(request)
self.assertEqual(response.status_code, 302)
expected_url = reverse("projects:timesheet-list")
self.assertEqual(response["location"], expected_url)
self.assertEqual(Timesheet.objects.filter(project=None).count(), 5)
class TimesheetDetailViewTestCase(TestCase):
def setUp(self):
self.view = views.TimesheetDetailView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
self.timesheet = mixer.blend("projects.Timesheet")
def test_match_expected_view(self):
url = resolve("/timesheets/1/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
request.user = self.user
response = self.view(request, id=self.timesheet.id)
self.assertEqual(response.status_code, 200)
self.assertIn("timesheet", response.context_data)
class TimesheetClockInViewTestCase(TestCase):
def setUp(self):
self.view = views.TimesheetClockInView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
def test_match_expected_view(self):
url = resolve("/timesheets/clock-in/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_redirect_to_referer(self):
request = self.factory.get("/")
request.user = self.user
expected_url = "redirect"
request.META["HTTP_REFERER"] = expected_url
response = self.view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response["location"], expected_url)
def test_redirect_to_home_when_there_is_not_a_referer(self):
request = self.factory.get("/")
request.user = self.user
expected_url = reverse("home")
response = self.view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response["location"], expected_url)
class TimesheetClockOutViewTestCase(TestCase):
def setUp(self):
self.view = views.TimesheetClockOutView.as_view()
self.factory = RequestFactory()
self.user = mixer.blend("users.User")
mixer.blend("projects.Timesheet", is_active=True)
def test_match_expected_view(self):
url = resolve("/timesheets/clock-out/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_redirect_to_referer(self):
request = self.factory.get("/")
request.user = self.user
expected_url = "redirect"
request.META["HTTP_REFERER"] = expected_url
response = self.view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response["location"], expected_url)
def test_redirect_to_home_when_there_is_not_a_referer(self):
request = self.factory.get("/")
request.user = self.user
expected_url = reverse("home")
response = self.view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response["location"], expected_url)
| 38.350087
| 83
| 0.643709
| 2,461
| 22,128
| 5.617229
| 0.067452
| 0.084635
| 0.079861
| 0.049479
| 0.869864
| 0.854528
| 0.838831
| 0.784433
| 0.784433
| 0.772425
| 0
| 0.012692
| 0.227314
| 22,128
| 576
| 84
| 38.416667
| 0.795824
| 0
| 0
| 0.711968
| 0
| 0
| 0.087627
| 0.018845
| 0
| 0
| 0
| 0
| 0.225152
| 1
| 0.133874
| false
| 0
| 0.014199
| 0
| 0.178499
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0d2c092b7856f52c402bc4c9dd1945c6e4de5db5
| 52,514
|
py
|
Python
|
tests/unit/test_sentence.py
|
dantiston/pyconll
|
046feef710e702c3347c5a37da9862ffcce220bb
|
[
"MIT"
] | null | null | null |
tests/unit/test_sentence.py
|
dantiston/pyconll
|
046feef710e702c3347c5a37da9862ffcce220bb
|
[
"MIT"
] | null | null | null |
tests/unit/test_sentence.py
|
dantiston/pyconll
|
046feef710e702c3347c5a37da9862ffcce220bb
|
[
"MIT"
] | null | null | null |
import pytest
from pyconll.unit.sentence import Sentence
from tests.tree.util import assert_tree_structure
from tests.unit.util import assert_token_members
def test_simple_sentence_construction():
"""
Test the construction of a simple sentence.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# text = Mais comment faire ?\n'
'1 Mais mais CCONJ _ _ 3 cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
assert sentence.id == 'fr-ud-dev_00003'
assert sentence.text == 'Mais comment faire ?'
assert len(sentence) == 4
assert_token_members(sentence['1'], '1', 'Mais', 'mais', 'CCONJ', None, {},
'3', 'cc', {}, {})
assert_token_members(sentence['2'], '2', 'comment', 'comment', 'ADV', None,
{}, '3', 'advmod', {}, {})
assert_token_members(sentence['3'], '3', 'faire', 'faire', 'VERB', None,
{'VerbForm': set(('Inf', ))}, '0', 'root', {}, {})
assert_token_members(sentence['4'], '4', '?', '?', 'PUNCT', None, {}, '3',
'punct', {}, {})
def test_cannot_assign_tokens():
"""
Test Sentence tokens cannot be assigned by id.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# text = Mais comment faire ?\n'
'1 Mais mais CCONJ _ _ 3 cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
with pytest.raises(TypeError):
sentence['1'] = sentence['2']
def test_metadata_parsing():
"""
Test if the sentence can accurately parse all metadata in the comments.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# newdoc id = test id\n'
'# text = Mais comment faire ?\n'
'# text_en = But how is it done ?\n'
'# translit = tat yathānuśrūyate.\n'
'1 Mais mais CCONJ _ _ 3 cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
assert sentence.meta_value('sent_id') == 'fr-ud-dev_00003'
assert sentence.meta_value('newdoc id') == 'test id'
assert sentence.meta_value('text') == 'Mais comment faire ?'
assert sentence.meta_value('text_en') == 'But how is it done ?'
assert sentence.meta_value('translit') == 'tat yathānuśrūyate.'
assert sentence.meta_present('text') is True
assert sentence.meta_present('translit') is True
assert sentence.meta_present('fake') is False
def test_singleton_parsing():
"""
Test if the sentence can accurately parse all metadata in the comments.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# newdoc\n'
'# text = Mais comment faire ?\n'
'# text_en = But how is it done ?\n'
'# translit = tat yathānuśrūyate.\n'
'1 Mais mais CCONJ _ _ 3 cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
assert sentence.meta_value('sent_id') == 'fr-ud-dev_00003'
assert sentence.meta_present('newdoc') is True
assert sentence.meta_value('text') == 'Mais comment faire ?'
assert sentence.meta_value('text_en') == 'But how is it done ?'
assert sentence.meta_value('translit') == 'tat yathānuśrūyate.'
def test_metadata_error():
"""
Test if the proper error is seen when asking for the value of a nonexisting
comment.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# newdoc\n'
'# text = Mais comment faire ?\n'
'# text_en = But how is it done ?\n'
'# translit = tat yathānuśrūyate.\n'
'1 Mais mais CCONJ _ _ 3 cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
with pytest.raises(KeyError):
sentence.meta_value('newpar')
def test_id_updating():
"""
Test updating the sentence id.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# newdoc id = test id\n'
'# text = Mais comment faire ?\n'
'# text_en = But how is it done ?\n'
'# translit = tat yathānuśrūyate.\n'
'1 Mais mais CCONJ _ _ 3 cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
sentence.id = 'fr-ud-train_00123'
assert sentence.meta_value('sent_id') == 'fr-ud-train_00123'
def test_iter():
"""
Test that the sentence can be properly iterated over.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# newdoc id = test id\n'
'# text = Mais comment faire ?\n'
'# text_en = But how is it done ?\n'
'# translit = tat yathānuśrūyate.\n'
'1 Mais mais CCONJ _ _ 3 cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
expected_ids = ['1', '2', '3', '4']
expected_forms = ['Mais', 'comment', 'faire', '?']
actual_ids = [token.id for token in sentence]
actual_forms = [token.form for token in sentence]
assert expected_ids == actual_ids
assert expected_forms == actual_forms
def test_str_indexing():
"""
Test indexing by token id (string).
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
test_token = sentence['8']
assert_token_members(test_token, '8', 'contenu', 'contenu', 'NOUN', None, {
'Gender': set(('Masc', )),
'Number': set(('Sing', ))
}, '9', 'nsubj', {}, {})
def test_int_indexing():
"""
Test indexing by the integer position in the sentence (int).
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
test_token = sentence[7]
assert_token_members(test_token, '8', 'contenu', 'contenu', 'NOUN', None, {
'Gender': set(('Masc', )),
'Number': set(('Sing', ))
}, '9', 'nsubj', {}, {})
def test_int_slice_indexing():
"""
Test slicing with integer over tokens.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
test_tokens = sentence[7:10]
assert_token_members(test_tokens[0], '8', 'contenu', 'contenu', 'NOUN',
None, {
'Gender': set(('Masc', )),
'Number': set(('Sing', ))
}, '9', 'nsubj', {}, {})
assert_token_members(
test_tokens[1], '9', 'diffère', 'différer', 'VERB', None, {
'Mood': set(('Ind', )),
'Number': set(('Sing', )),
'Person': set(('3', )),
'Tense': set(('Pres', )),
'VerbForm': set(('Fin', ))
}, '3', 'conj', {}, {})
assert_token_members(test_tokens[2], '10', 'donc', 'donc', 'ADV', None, {},
'9', 'advmod', {}, {})
def test_int_slice_indexing_step():
"""
Test slicing with integers and with a step size.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
test_tokens = sentence[0:5:2]
assert_token_members(
test_tokens[0], '1', 'Les', 'le', 'DET', None, {
'Definite': set(('Def', )),
'Gender': set(('Fem', )),
'Number': set(('Plur', )),
'PronType': set(('Art', ))
}, '2', 'det', {}, {})
assert_token_members(
test_tokens[1], '3', 'durent', 'durer', 'VERB', None, {
'Mood': set(('Ind', )),
'Number': set(('Plur', )),
'Person': set(('3', )),
'Tense': set(('Pres', )),
'VerbForm': set(('Fin', ))
}, '0', 'root', {}, {})
assert_token_members(test_tokens[2], '5', 'ans', 'an', 'NOUN', None, {
'Gender': set(('Masc', )),
'Number': set(('Plur', )),
}, '3', 'obj', {}, {})
def test_str_slice_indexing_step():
"""
Test slicing with string indices and with a step size.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
test_tokens = sentence['1':'6':2]
assert_token_members(
test_tokens[0], '1', 'Les', 'le', 'DET', None, {
'Definite': set(('Def', )),
'Gender': set(('Fem', )),
'Number': set(('Plur', )),
'PronType': set(('Art', ))
}, '2', 'det', {}, {})
assert_token_members(
test_tokens[1], '3', 'durent', 'durer', 'VERB', None, {
'Mood': set(('Ind', )),
'Number': set(('Plur', )),
'Person': set(('3', )),
'Tense': set(('Pres', )),
'VerbForm': set(('Fin', ))
}, '0', 'root', {}, {})
assert_token_members(test_tokens[2], '5', 'ans', 'an', 'NOUN', None, {
'Gender': set(('Masc', )),
'Number': set(('Plur', )),
}, '3', 'obj', {}, {})
def test_str_slice_indexing():
"""
Test slicing with strings over tokens.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
test_tokens = sentence['8':'11']
assert_token_members(test_tokens[0], '8', 'contenu', 'contenu', 'NOUN',
None, {
'Gender': set(('Masc', )),
'Number': set(('Sing', ))
}, '9', 'nsubj', {}, {})
assert_token_members(
test_tokens[1], '9', 'diffère', 'différer', 'VERB', None, {
'Mood': set(('Ind', )),
'Number': set(('Sing', )),
'Person': set(('3', )),
'Tense': set(('Pres', )),
'VerbForm': set(('Fin', ))
}, '3', 'conj', {}, {})
assert_token_members(test_tokens[2], '10', 'donc', 'donc', 'ADV', None, {},
'9', 'advmod', {}, {})
def test_int_slice_indexing_missing_value_start():
"""
Test that the sentence is properly sliced when the start or end is missing.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
test_tokens = sentence[:3]
assert_token_members(
test_tokens[0], '1', 'Les', 'le', 'DET', None, {
'Definite': set(('Def', )),
'Gender': set(('Fem', )),
'Number': set(('Plur', )),
'PronType': set(('Art', ))
}, '2', 'det', {}, {})
assert_token_members(test_tokens[1], '2', 'études', 'étude', 'NOUN', None,
{
'Gender': set(('Fem', )),
'Number': set(('Plur', ))
}, '3', 'nsubj', {}, {})
assert_token_members(
test_tokens[2], '3', 'durent', 'durer', 'VERB', None, {
'Mood': set(('Ind', )),
'Number': set(('Plur', )),
'Person': set(('3', )),
'Tense': set(('Pres', )),
'VerbForm': set(('Fin', ))
}, '0', 'root', {}, {})
def test_int_slice_indexing_missing_value_stop():
"""
Test that the sentence is properly sliced when the start or end is missing.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
test_tokens = sentence[10:]
assert_token_members(test_tokens[0], '11', 'selon', 'selon', 'ADP', None,
{}, '13', 'case', {}, {})
assert_token_members(
test_tokens[1], '12', 'les', 'le', 'DET', None, {
'Definite': set(('Def', )),
'Number': set(('Plur', )),
'PronType': set(('Art', ))
}, '13', 'det', {}, {})
assert_token_members(test_tokens[2], '13', 'Facultés', 'Facultés', 'PROPN',
None, {}, '9', 'obl', {},
{'SpaceAfter': set(('No', ))})
assert_token_members(test_tokens[3], '14', '.', '.', 'PUNCT', None, {},
'3', 'punct', {}, {})
def test_proper_slice_type():
"""
Test that the type provided to a slice must be an int, str, or slice.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
with pytest.raises(ValueError):
token = sentence[7.8]
def test_len_basic():
"""
Test if the length is appropriate for a normal sentence.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
assert len(sentence) == 14
def test_len_empty():
"""
Test if an empty sentence is properly parsed.
"""
source = ''
sentence = Sentence(source)
assert len(sentence) == 0
def test_text_readonly():
"""
Test that the text comment of a Sentence is read properly and is readonly.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
with pytest.raises(AttributeError):
sentence.text = 'error causing text'
assert sentence.text == 'Les études durent six ans mais leur contenu diffère donc selon les Facultés.'
def test_output():
"""
Test if the sentence output is properly produced.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
assert sentence.conll() == source
def test_modified_output():
"""
Test if the sentence is properly outputted after changing the annotation.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
sentence.id = 'fr-ud-dev_00231'
sentence['13'].lemma = 'facultés'
sentence['13'].upos = 'NOUN'
sentence['13'].feats['Number'] = set()
sentence['13'].feats['Number'].add('Fem')
output = (
'# sent_id = fr-ud-dev_00231\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés facultés NOUN _ Number=Fem 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
assert sentence.conll() == output
def test_change_comments():
"""
Test that comment values (other than text or id) can be changed through the
meta api.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
expected = (
'# newpar id = xyz-1\n'
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
sentence.set_meta('newpar id', 'xyz-1')
assert sentence.conll() == expected
def test_add_comments():
"""
Test that comment values (other than text) can be added through the meta
api.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
expected = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'# x-coord = 2\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
sentence.set_meta('x-coord', '2')
assert sentence.conll() == expected
def test_remove_comments():
"""
Test that comments can be removed from the sentence (other than text), and
removing non-existent comments throws a KeyError.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'# x-coord = 2\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
expected = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
sentence.remove_meta('x-coord')
assert sentence.conll() == expected
with pytest.raises(ValueError):
sentence.remove_meta('text')
with pytest.raises(KeyError):
sentence.remove_meta('x-coord')
def test_singleton_comment():
"""
Test that a singleton comment is properly output.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
expected = (
'# foreign\n'
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
sentence.set_meta('foreign')
assert sentence.conll() == expected
def test_invalid_comment_modification():
"""
Test that an error is thrown when the text is attempted to be changed
through the set_meta function.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
with pytest.raises(ValueError):
sentence.set_meta('text', 'Qualcosa differente alla frase')
def test_no_id():
"""
Test that a sentence can be properly constructed with no id.
"""
source = (
'# newpar id\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
assert sentence.id is None
def test_no_id_singleton():
"""
Test that a sentence can be properly constructed with no id.
"""
source = (
'# newpar id\n'
'# sent_id =\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
assert sentence.id is None
def test_no_text():
"""
Test that a sentence can be properly constructed with no text field.
"""
source = (
'# newpar id\n'
'# sent_id =\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
assert sentence.text is None
def test_no_text_singleton():
"""
Test that a sentence can be properly constructed with no text field.
"""
source = (
'# newpar id\n'
'# sent_id =\n'
'# text =\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
assert sentence.text is None
def test_invalid_sentence_by_token():
"""
Test that an invalid token results in an invalid sentence.
"""
source = (
'# newpar id\n'
'# sent_id =\n'
'# text =\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
with pytest.raises(ValueError):
sentence = Sentence(source)
def test_to_tree_standard_sentence():
"""
Test that a normal sentence can be parsed properly.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# text = Mais comment faire ?\n'
'1 Mais mais CCONJ _ _ 3 cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
st = sentence.to_tree()
assert_tree_structure(
st, {
(): sentence[2],
(0, ): sentence[0],
(1, ): sentence[1],
(2, ): sentence[3]
})
def test_to_tree_token_with_no_head():
"""
Test that a sentence with a token with no head results in error.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# text = Mais comment faire ?\n'
'1 Mais mais CCONJ _ _ _ cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
with pytest.raises(ValueError):
st = sentence.to_tree()
def test_to_tree_no_root_token():
"""
Test that a sentence with no root token results in error.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# text = Mais comment faire ?\n'
'1 Mais mais CCONJ _ _ _ cc _ _\n'
'2 comment comment ADV _ _ 3 advmod _ _\n'
'3 faire faire VERB _ VerbForm=Inf 1 root _ _\n'
'4 ? ? PUNCT _ _ 3 punct _ _\n')
sentence = Sentence(source)
with pytest.raises(ValueError):
st = sentence.to_tree()
def test_to_tree_multiword_present():
"""
Test that a normal sentence can be parsed properly.
"""
source = ('# sent_id = fr-ud-dev_00003\n'
'# text = Mais comment faire ?\n'
'1 Mais mais CCONJ _ _ 5 cc _ _\n'
'2 comment comment ADV _ _ 5 advmod _ _\n'
'3-4 du _ _ _ _ _ _ _ _\n'
'3 de de ADP _ _ 4 nmod _ _\n'
'4 le le DET _ _ 5 det _ _\n'
'5 faire faire VERB _ VerbForm=Inf 0 root _ _\n'
'6 ? ? PUNCT _ _ 5 punct _ _\n')
sentence = Sentence(source)
st = sentence.to_tree()
assert_tree_structure(
st, {
(): sentence[5],
(0, ): sentence[0],
(1, ): sentence[1],
(2, ): sentence[4],
(3, ): sentence[6],
(2, 0): sentence[3]
})
def test_to_tree_multi_level():
"""
Test a sentence with several levels of dependencies deep is properly parsed.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
st = sentence.to_tree()
assert_tree_structure(
st, {
(): sentence[2],
(0, ): sentence[1],
(1, ): sentence[4],
(2, ): sentence[8],
(3, ): sentence[13],
(0, 0): sentence[0],
(1, 0): sentence[3],
(2, 0): sentence[5],
(2, 1): sentence[7],
(2, 2): sentence[9],
(2, 3): sentence[12],
(2, 1, 0): sentence[6],
(2, 3, 0): sentence[10],
(2, 3, 1): sentence[11]
})
def test_tree_empty_sentence():
"""
Test that an empty sentence throws an error on Tree creation.
"""
source = ''
sentence = Sentence(source)
with pytest.raises(ValueError):
st = sentence.to_tree()
def test_tree_no_extra_nodes():
"""
Test that there are the right amount of nodes in the tree.
"""
source = (
'# sent_id = fr-ud-dev_00002\n'
'# text = Les études durent six ans mais leur contenu diffère donc selon les Facultés.\n'
'1 Les le DET _ Definite=Def|Gender=Fem|Number=Plur|PronType=Art 2 det _ _\n'
'2 études étude NOUN _ Gender=Fem|Number=Plur 3 nsubj _ _\n'
'3 durent durer VERB _ Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin 0 root _ _\n'
'4 six six NUM _ _ 5 nummod _ _\n'
'5 ans an NOUN _ Gender=Masc|Number=Plur 3 obj _ _\n'
'6 mais mais CCONJ _ _ 9 cc _ _\n'
'7 leur son DET _ Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs 8 det _ _\n'
'8 contenu contenu NOUN _ Gender=Masc|Number=Sing 9 nsubj _ _\n'
'9 diffère différer VERB _ Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 conj _ _\n'
'10 donc donc ADV _ _ 9 advmod _ _\n'
'11 selon selon ADP _ _ 13 case _ _\n'
'12 les le DET _ Definite=Def|Number=Plur|PronType=Art 13 det _ _\n'
'13 Facultés Facultés PROPN _ _ 9 obl _ SpaceAfter=No\n'
'14 . . PUNCT _ _ 3 punct _ _')
sentence = Sentence(source)
st = sentence.to_tree()
count = 0
nodes = [st]
while len(nodes) > 0:
count += 1
node = nodes.pop()
for child in node:
nodes.append(child)
assert len(sentence) == count
| 44.768968
| 106
| 0.593061
| 7,519
| 52,514
| 3.886288
| 0.035909
| 0.051333
| 0.048732
| 0.032853
| 0.899867
| 0.88563
| 0.868177
| 0.862359
| 0.855481
| 0.854317
| 0
| 0.042945
| 0.288323
| 52,514
| 1,172
| 107
| 44.807167
| 0.738929
| 0.045531
| 0
| 0.838043
| 0
| 0.163043
| 0.629129
| 0.184022
| 0
| 0
| 0
| 0
| 0.068478
| 1
| 0.041304
| false
| 0
| 0.004348
| 0
| 0.045652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0d46afdd344ef8e5277bd89f22d9c6bb6fbee826
| 158
|
py
|
Python
|
dresses/admin.py
|
IvanDeveloperPro/happy_dress
|
9dd8ad3a5b5290afb708955d474a266abceff00b
|
[
"MIT"
] | null | null | null |
dresses/admin.py
|
IvanDeveloperPro/happy_dress
|
9dd8ad3a5b5290afb708955d474a266abceff00b
|
[
"MIT"
] | null | null | null |
dresses/admin.py
|
IvanDeveloperPro/happy_dress
|
9dd8ad3a5b5290afb708955d474a266abceff00b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Basket, Dress, Order
admin.site.register(Dress)
admin.site.register(Order)
admin.site.register(Basket)
| 19.75
| 40
| 0.803797
| 23
| 158
| 5.521739
| 0.478261
| 0.212598
| 0.401575
| 0.346457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094937
| 158
| 7
| 41
| 22.571429
| 0.888112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b4d6ccae18b1a7f4d1f3b5f363fb17dd87c3e0da
| 232
|
py
|
Python
|
DQM/DTMonitorModule/python/dtDataIntegrityTask_EvF_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQM/DTMonitorModule/python/dtDataIntegrityTask_EvF_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQM/DTMonitorModule/python/dtDataIntegrityTask_EvF_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQM.DTMonitorModule.dtDataIntegrityTask_cfi import *
import EventFilter.DTRawToDigi.dturosunpacker_cfi
dtunpacker = EventFilter.DTRawToDigi.dturosunpacker_cfi.dturosunpacker.clone()
| 25.777778
| 78
| 0.866379
| 24
| 232
| 8.25
| 0.666667
| 0.222222
| 0.363636
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073276
| 232
| 8
| 79
| 29
| 0.92093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b4db3535c06f5d9c84651578adb2ddbc9e2e475b
| 2,947
|
py
|
Python
|
tests/test_schemas.py
|
chaostoolkit/chaosplatform-scheduling
|
0d98023e3fbcb53aaa7714cce36900f8b99529f4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_schemas.py
|
chaostoolkit/chaosplatform-scheduling
|
0d98023e3fbcb53aaa7714cce36900f8b99529f4
|
[
"Apache-2.0"
] | 2
|
2020-06-03T12:51:35.000Z
|
2020-06-03T12:52:45.000Z
|
tests/test_schemas.py
|
chaostoolkit/chaosplatform-scheduling
|
0d98023e3fbcb53aaa7714cce36900f8b99529f4
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
from typing import Dict
from uuid import UUID
from chaosplt_scheduling.schemas import upload_scheduling_schema
def test_scheduling_request(scheduling: Dict[str, str]):
s = upload_scheduling_schema.load(scheduling)
assert str(s["org"]) == scheduling["org"]
assert str(s["workspace"]) == scheduling["workspace"]
assert str(s["token"]) == scheduling["token"]
assert str(s["experiment"]) == scheduling["experiment"]
assert "repeat" not in s
assert "interval" not in s
assert "cron" not in s
assert "settings" not in s
assert "configuration" not in s
assert "secrets" not in s
def test_cron_scheduling_request(scheduling: Dict[str, str]):
scheduling["cron"] = "* 8 * * *"
s = upload_scheduling_schema.load(scheduling)
assert str(s["org"]) == scheduling["org"]
assert str(s["workspace"]) == scheduling["workspace"]
assert str(s["token"]) == scheduling["token"]
assert str(s["experiment"]) == scheduling["experiment"]
assert s["cron"] == "* 8 * * *"
assert "repeat" not in s
assert "interval" not in s
assert "settings" not in s
assert "configuration" not in s
assert "secrets" not in s
def test_cron_scheduling_with_repeat_request(scheduling: Dict[str, str]):
scheduling["cron"] = "* 8 * * *"
scheduling["repeat"] = 5
s = upload_scheduling_schema.load(scheduling)
assert str(s["org"]) == scheduling["org"]
assert str(s["workspace"]) == scheduling["workspace"]
assert str(s["token"]) == scheduling["token"]
assert str(s["experiment"]) == scheduling["experiment"]
assert s["cron"] == "* 8 * * *"
assert s["repeat"] == 5
assert "interval" not in s
assert "settings" not in s
assert "configuration" not in s
assert "secrets" not in s
def test_interval_scheduling_request(scheduling: Dict[str, str]):
scheduling["interval"] = 10
s = upload_scheduling_schema.load(scheduling)
assert str(s["org"]) == scheduling["org"]
assert str(s["workspace"]) == scheduling["workspace"]
assert str(s["token"]) == scheduling["token"]
assert str(s["experiment"]) == scheduling["experiment"]
assert s["interval"] == 10
assert "repeat" not in s
assert "cron" not in s
assert "settings" not in s
assert "configuration" not in s
assert "secrets" not in s
def test_interval_with_repeat_scheduling_request(scheduling: Dict[str, str]):
scheduling["interval"] = 10
scheduling["repeat"] = 4
s = upload_scheduling_schema.load(scheduling)
assert str(s["org"]) == scheduling["org"]
assert str(s["workspace"]) == scheduling["workspace"]
assert str(s["token"]) == scheduling["token"]
assert str(s["experiment"]) == scheduling["experiment"]
assert s["interval"] == 10
assert s["repeat"] == 4
assert "cron" not in s
assert "settings" not in s
assert "configuration" not in s
assert "secrets" not in s
| 35.939024
| 77
| 0.661351
| 380
| 2,947
| 5.047368
| 0.094737
| 0.062565
| 0.075078
| 0.118874
| 0.893639
| 0.893639
| 0.87122
| 0.87122
| 0.827424
| 0.767987
| 0
| 0.006714
| 0.191381
| 2,947
| 81
| 78
| 36.382716
| 0.798154
| 0
| 0
| 0.814286
| 0
| 0
| 0.194096
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 1
| 0.071429
| false
| 0
| 0.057143
| 0
| 0.128571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b4dddabf41106cb123d6c39787696aa4b5f04157
| 9,990
|
py
|
Python
|
apicomponents/metrics.py
|
tonnyhideyori/dependencytrack-pywrap
|
58d4a8ac8862bbdb7007ab483f38f5871ab55c48
|
[
"MIT"
] | null | null | null |
apicomponents/metrics.py
|
tonnyhideyori/dependencytrack-pywrap
|
58d4a8ac8862bbdb7007ab483f38f5871ab55c48
|
[
"MIT"
] | 5
|
2021-11-18T20:35:12.000Z
|
2021-11-25T19:03:16.000Z
|
apicomponents/metrics.py
|
tonnyhideyori/dependencytrack-pywrap
|
58d4a8ac8862bbdb7007ab483f38f5871ab55c48
|
[
"MIT"
] | 2
|
2021-11-15T19:58:15.000Z
|
2021-11-23T12:55:04.000Z
|
class DependencyTrackMetrics(object):
# Metrics
def get_all_metrics(self, pageSize=100):
"""
Returns the sum of all vulnerabilities in the database by year and month
"""
metrics_list = list()
pageNumber = 1
response = self.session.get(self.apicall + f"/v1/metrics/vulnerability", params={
"pageSize": pageSize, "pageNumber": pageNumber})
for metric in range(0, len(response.json())):
metrics_list.append(response.json()[metric-1])
while len(response.json()) == pageSize:
pageNumber += 1
response = self.session.get(self.apicall + f"/v1/metrics/vulnerability", params={
"pageSize": pageSize, "pageNumber": pageNumber})
for metric in range(0, len(response.json())):
metrics_list.append(response.json()[metric-1])
if response.status_code == 200:
return metrics_list
else:
return (f"Unauthorized, {response.status_code}")
def get_metrics_portolio_bydate(self, date):
"""
Returns historical metrics for the entire portfolio from a specific date.
date: The start date to retrieve metric. Date format must be YYYYMMDD
"""
response = self.session.get(
self.apicall + f"/v1/metrics/portfolio/since/{date}")
if response.status_code == 200:
return response.json()
else:
return (f"Unauthorized, {response.status_code}")
def get_metrics_project_bydate(self, uuid, date):
"""
Returns historical metrics for a specific project from a specific date
date: The start date to retrieve metric. Date format must be YYYYMMDD.
uuid: The UUID of the project to retrieve metrics for.
"""
response = self.session.get(
self.apicall + f"/v1/metrics/project/{uuid}/since/{date}")
if response.status_code == 200:
return response.json()
elif response.status_code == 401:
return (f"Unauthorized , {response.status_code}")
elif response.status_code == 403:
return (f"Access to the specified project is forbidden, {response.status_code}")
elif response.status_code == 404:
return (f"Project not found, {response.status_code}")
else:
return (response.status_code)
def get_current_metrics_portfolio(self):
"""
Returns current metrics for entire portfolio
"""
response = self.session.get(
self.apicall + f"/v1/metrics/portfolio/current")
if response.status_code == 200:
return response.json()
else:
return (f"Unauthorized , {response.status_code}")
def get_metrics_dayNumber(self, days):
"""
Returns X days of historical metrics for the entire portfolio(int32)
days: The number of days back to retrieve metrics for.
"""
response = self.session.get(
self.apicall + f"/v1/metrics/portfolio/{days}/days")
if response.status_code == 200:
return response.json()
else:
return (f"Unauthorized , {response.status_code}")
def get_metrics_refresh_portfolio(self):
"""
Requests a refresh of the portfolio metrics
"""
response = self.session.get(
self.apicall + f"/v1/metrics/portfolio/refresh")
if response.status_code == 200:
return (f"successful operation , {response.status_code}")
else:
return (f"Unauthorized , {response.status_code}")
def get_metrics_specific_project(self, uuid):
"""
returns current metrics for a specific project.
uuid: The UUID of the project to retrieve metrics for
"""
response = self.session.get(
self.apicall + f"/v1/metrics/project/{uuid}/current")
if response.status_code == 200:
return response.json()
else:
return (f"Unauthorized , {response.status_code}")
def get_metrics_specific_project_days(self, uuid, days):
"""
Returns X days of historical metrics for a specific project
uuid: The UUID of the project to retrieve metrics for.
days: The number of days back to retrieve metrics for.
"""
response = self.session.get(
self.apicall + f"/v1/metrics/project/{uuid}/days/{days}")
if response.status_code == 200:
return response.json()
elif response.status_code == 401:
return (f"Unauthorized , {response.status_code}")
elif response.status_code == 403:
return (f"Access to the specified project is forbidden, {response.status_code}")
elif response.status_code == 404:
return (f"Project not found, {response.status_code}")
else:
return (response.status_code)
def get_metrics_refresh_project(self, uuid):
"""
requests a refresh of a specific project metrics.
uuid: The UUID of the project to retrieve metrics for.
"""
response = self.session.get(
self.apicall + f"/v1/metrics/project/{uuid}/refresh")
if response.status_code == 200:
return (f"successful operation , {response.status_code}")
elif response.status_code == 401:
return (f"Unauthorized , {response.status_code}")
elif response.status_code == 403:
return (f"Access to the specified project is forbidden , {response.status_code}")
elif response.status_code == 404:
return (f"Project not found, {response.status_code}")
else:
return (response.status_code)
def get_current_metrics_component(self, uuid):
"""
Returns current metrics for a specific component
uuid: The UUID of the component to retrieve metrics for.
"""
response = self.session.get(
self.apicall + f"/v1/metrics/component/{uuid}/current")
if response.status_code == 200:
return response.json()
elif response.status_code == 401:
return (f"Unauthorized , {response.status_code}")
elif response.status_code == 403:
return (f"Access to the specified project is forbidden , {response.status_code}")
elif response.status_code == 404:
return (f"Project not found, {response.status_code}")
else:
return (response.status_code)
def get_metrics_component_bydate(self, uuid, date, pageSize=100):
"""
Returns historical metrics for a specific component from a specific date
Args:
uuid (string): The UUID of the component to retrieve metrics for.
date (string): The start date to retrieve metrics for.(Date format must be YYYYMMDD)
"""
metrics_list = list()
pageNumber = 1
response = self.session.get(self.apicall + f"/v1/metrics/component/{uuid}/since/{date}", params={
"pageSize": pageSize, "pageNumber": pageNumber})
for metric in range(0, len(response.json())):
metrics_list.append(response.json()[metric-1])
while len(response.json()) == pageSize:
pageNumber += 1
response = self.session.get(self.apicall + f"/v1/metrics/component/{uuid}/since/{date}", params={
"pageSize": pageSize, "pageNumber": pageNumber})
for metric in range(0, len(response.json())):
metrics_list.append(response.json()[metric-1])
if response.status_code == 200:
return metrics_list
else:
return (f"Unauthorized , {response.status_code}")
def get_metrics_component_bydays(self, uuid, days, pageSize=100):
"""
Returns X days of historical metrics for a specific component
Args:
uuid (string): The UUID of the component to retrieve metrics for.
days (int32): The number of days back to retrieve metrics for.
"""
metrics_list = list()
pageNumber = 1
response = self.session.get(self.apicall + f"/v1/metrics/component/{uuid}/since/{days}", params={
"pageSize": pageSize, "pageNumber": pageNumber})
for metric in range(0, len(response.json())):
metrics_list.append(response.json()[metric-1])
while len(response.json()) == pageSize:
pageNumber += 1
response = self.session.get(self.apicall + f"/v1/metrics/component/{uuid}/since/{days}", params={
"pageSize": pageSize, "pageNumber": pageNumber})
for metric in range(0, len(response.json())):
metrics_list.append(response.json()[metric-1])
if response.status_code == 200:
return metrics_list
else:
return (f"Unauthorized, {response.status_code}")
def get_metrics_component_refresh(self, uuid):
"""[Requests a refresh of a specific components metrics]
Args:
uuid ([string]): [The UUID of the component to retrieve metrics for.]
Returns:
[string]: [status code]
"""
response = self.session.get(
self.apicall + f"/v1/metrics/component/{uuid}/refresh")
if response.status_code == 200:
return (f"successful operation , {response.status_code}")
elif response.status_code == 401:
return (f"Unauthorized , {response.status_code}")
elif response.status_code == 403:
return (f"Access to the specified project is forbidden, {response.status_code}")
elif response.status_code == 404:
return (f"Project not found, {response.status_code}")
else:
return (response.status_code)
| 44.008811
| 109
| 0.5999
| 1,130
| 9,990
| 5.20708
| 0.080531
| 0.101971
| 0.180489
| 0.059823
| 0.920972
| 0.905337
| 0.879334
| 0.875425
| 0.845683
| 0.824099
| 0
| 0.018611
| 0.295395
| 9,990
| 227
| 110
| 44.008811
| 0.817304
| 0.173774
| 0
| 0.842105
| 0
| 0
| 0.233726
| 0.144541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085526
| false
| 0
| 0
| 0
| 0.361842
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2ea5e0654f400b2a564780898939035e29e03590
| 17,295
|
py
|
Python
|
test/test_system.py
|
MSchauperl/forcebalance
|
df2d8fd344e7442a3e304c6f0c8e31732d9241b1
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_system.py
|
MSchauperl/forcebalance
|
df2d8fd344e7442a3e304c6f0c8e31732d9241b1
|
[
"BSD-3-Clause"
] | 1
|
2019-06-25T22:04:36.000Z
|
2019-08-21T21:19:39.000Z
|
test/test_system.py
|
MSchauperl/forcebalance
|
df2d8fd344e7442a3e304c6f0c8e31732d9241b1
|
[
"BSD-3-Clause"
] | 1
|
2019-06-24T20:05:30.000Z
|
2019-06-24T20:05:30.000Z
|
from __future__ import absolute_import
from builtins import str
import unittest
import os, sys
import tarfile
from __init__ import ForceBalanceTestCase
from forcebalance.nifty import printcool_dictionary
from forcebalance.parser import parse_inputs
from forcebalance.forcefield import FF
from forcebalance.objective import Objective
from forcebalance.optimizer import Optimizer, Counter
from collections import OrderedDict
from numpy import array
from numpy import absolute
# expected results (mvals) taken from previous runs. Update this if it changes and seems reasonable (updated 10/24/13)
#EXPECTED_WATER_RESULTS = array([3.3192e-02, 4.3287e-02, 5.5072e-03, -4.5933e-02, 1.5499e-02, -3.7655e-01, 2.4720e-03, 1.1914e-02, 1.5066e-01])
EXPECTED_WATER_RESULTS = array([4.2370e-02, 3.1217e-02, 5.6925e-03, -4.8114e-02, 1.6735e-02, -4.1722e-01, 6.2716e-03, 4.6306e-03, 2.5960e-01])
# expected results (mvals) taken from previous runs. Update this if it changes and seems reasonable (updated 01/24/14)
EXPECTED_BROMINE_RESULTS = array([-0.305718, -0.12497])
# expected result (pvals) taken from ethanol GB parameter optimization. Update this if it changes and seems reasonable (updated 09/05/14)
EXPECTED_ETHANOL_RESULTS = array([1.2286e-01, 8.3624e-01, 1.0014e-01, 8.4533e-01, 1.8740e-01, 6.8820e-01, 1.4606e-01, 8.3518e-01])
# fail test if we take more than this many iterations to converge. Update this as necessary
ITERATIONS_TO_CONVERGE = 5
# expected results taken from previous runs. Update this if it changes and seems reasonable (updated 07/23/14)
EXPECTED_LIPID_RESULTS = array([-6.7553e-03, -2.4070e-02])
class TestWaterTutorial(ForceBalanceTestCase):
def setUp(self):
super(ForceBalanceTestCase,self).setUp()
os.chdir('studies/001_water_tutorial')
targets = tarfile.open('targets.tar.bz2','r')
targets.extractall()
targets.close()
def tearDown(self):
os.system('rm -rf results *.bak *.tmp')
super(ForceBalanceTestCase,self).tearDown()
def runTest(self):
"""Check water tutorial study runs without errors"""
self.logger.debug("\nSetting input file to 'very_simple.in'\n")
input_file='very_simple.in'
## The general options and target options that come from parsing the input file
self.logger.debug("Parsing inputs...\n")
options, tgt_opts = parse_inputs(input_file)
self.logger.debug("options:\n%s\n\ntgt_opts:\n%s\n\n" % (str(options), str(tgt_opts)))
self.assertEqual(dict,type(options), msg="\nParser gave incorrect type for options")
self.assertEqual(list,type(tgt_opts), msg="\nParser gave incorrect type for tgt_opts")
for target in tgt_opts:
self.assertEqual(dict, type(target), msg="\nParser gave incorrect type for target dict")
## The force field component of the project
forcefield = FF(options)
self.assertEqual(FF, type(forcefield), msg="\nExpected forcebalance forcefield object")
## The objective function
objective = Objective(options, tgt_opts, forcefield)
self.assertEqual(Objective, type(objective), msg="\nExpected forcebalance objective object")
## The optimizer component of the project
self.logger.debug("Creating optimizer: ")
optimizer = Optimizer(options, objective, forcefield)
self.assertEqual(Optimizer, type(optimizer), msg="\nExpected forcebalance optimizer object")
self.logger.debug(str(optimizer) + "\n")
## Actually run the optimizer.
self.logger.debug("Done setting up! Running optimizer...\n")
result = optimizer.Run()
self.logger.debug("\nOptimizer finished. Final results:\n")
self.logger.debug(str(result) + '\n')
self.assertNdArrayEqual(EXPECTED_WATER_RESULTS,result,delta=0.001,
msg="\nCalculation results have changed from previously calculated values.\n"
"If this seems reasonable, update EXPECTED_WATER_RESULTS in test_system.py with these values")
# Fail if calculation takes longer than previously to converge
self.assertGreaterEqual(ITERATIONS_TO_CONVERGE, Counter(), msg="\nCalculation took longer than expected to converge (%d iterations vs previous of %d)" %\
(ITERATIONS_TO_CONVERGE, Counter()))
class TestVoelzStudy(ForceBalanceTestCase):
def setUp(self):
super(ForceBalanceTestCase,self).setUp()
os.chdir('studies/009_voelz_nspe')
def tearDown(self):
os.system('rm -rf results *.bak *.tmp')
super(ForceBalanceTestCase,self).tearDown()
def runTest(self):
"""Check voelz study runs without errors"""
self.logger.debug("\nSetting input file to 'options.in'\n")
input_file='options.in'
## The general options and target options that come from parsing the input file
self.logger.debug("Parsing inputs...\n")
options, tgt_opts = parse_inputs(input_file)
self.logger.debug("options:\n%s\n\ntgt_opts:\n%s\n\n" % (str(options), str(tgt_opts)))
self.assertEqual(dict,type(options), msg="\nParser gave incorrect type for options")
self.assertEqual(list,type(tgt_opts), msg="\nParser gave incorrect type for tgt_opts")
for target in tgt_opts:
self.assertEqual(dict, type(target), msg="\nParser gave incorrect type for target dict")
## The force field component of the project
self.logger.debug("Creating forcefield using loaded options: ")
forcefield = FF(options)
self.logger.debug(str(forcefield) + "\n")
self.assertEqual(FF, type(forcefield), msg="\nExpected forcebalance forcefield object")
## The objective function
self.logger.debug("Creating object using loaded options and forcefield: ")
objective = Objective(options, tgt_opts, forcefield)
self.logger.debug(str(objective) + "\n")
self.assertEqual(Objective, type(objective), msg="\nExpected forcebalance objective object")
## The optimizer component of the project
self.logger.debug("Creating optimizer: ")
optimizer = Optimizer(options, objective, forcefield)
self.logger.debug(str(optimizer) + "\n")
self.assertEqual(Optimizer, type(optimizer), msg="\nExpected forcebalance optimizer object")
## Actually run the optimizer.
self.logger.debug("Done setting up! Running optimizer...\n")
result = optimizer.Run()
self.logger.debug("\nOptimizer finished. Final results:\n")
self.logger.debug(str(result) + '\n')
class TestBromineStudy(ForceBalanceTestCase):
def setUp(self):
super(ForceBalanceTestCase,self).setUp()
os.chdir('studies/003_liquid_bromine')
def tearDown(self):
os.system('rm -rf results *.bak *.tmp')
super(ForceBalanceTestCase,self).tearDown()
def runTest(self):
"""Check liquid bromine study converges to expected results"""
self.logger.debug("\nSetting input file to 'options.in'\n")
input_file='optimize.in'
## The general options and target options that come from parsing the input file
self.logger.debug("Parsing inputs...\n")
options, tgt_opts = parse_inputs(input_file)
self.logger.debug("options:\n%s\n\ntgt_opts:\n%s\n\n" % (str(options), str(tgt_opts)))
self.assertEqual(dict,type(options), msg="\nParser gave incorrect type for options")
self.assertEqual(list,type(tgt_opts), msg="\nParser gave incorrect type for tgt_opts")
for target in tgt_opts:
self.assertEqual(dict, type(target), msg="\nParser gave incorrect type for target dict")
## The force field component of the project
self.logger.debug("Creating forcefield using loaded options: ")
forcefield = FF(options)
self.logger.debug(str(forcefield) + "\n")
self.assertEqual(FF, type(forcefield), msg="\nExpected forcebalance forcefield object")
## The objective function
self.logger.debug("Creating object using loaded options and forcefield: ")
objective = Objective(options, tgt_opts, forcefield)
self.logger.debug(str(objective) + "\n")
self.assertEqual(Objective, type(objective), msg="\nExpected forcebalance objective object")
## The optimizer component of the project
self.logger.debug("Creating optimizer: ")
optimizer = Optimizer(options, objective, forcefield)
self.logger.debug(str(optimizer) + "\n")
self.assertEqual(Optimizer, type(optimizer), msg="\nExpected forcebalance optimizer object")
## Actually run the optimizer.
self.logger.debug("Done setting up! Running optimizer...\n")
result = optimizer.Run()
self.logger.debug("\nOptimizer finished. Final results:\n")
self.logger.debug(str(result) + '\n')
self.assertNdArrayEqual(EXPECTED_BROMINE_RESULTS,result,delta=0.02,
msg="\nCalculation results have changed from previously calculated values.\n"
"If this seems reasonable, update EXPECTED_BROMINE_RESULTS in test_system.py with these values")
class TestThermoBromineStudy(ForceBalanceTestCase):
def setUp(self):
super(ForceBalanceTestCase,self).setUp()
os.chdir('studies/004_thermo_liquid_bromine')
def tearDown(self):
os.system('rm -rf results *.bak *.tmp')
super(ForceBalanceTestCase,self).tearDown()
def runTest(self):
"""Check liquid bromine study (Thermo target) converges to expected results"""
self.logger.debug("\nSetting input file to 'optimize.in'\n")
input_file='optimize.in'
## The general options and target options that come from parsing the input file
self.logger.debug("Parsing inputs...\n")
options, tgt_opts = parse_inputs(input_file)
self.logger.debug("options:\n%s\n\ntgt_opts:\n%s\n\n" % (str(options), str(tgt_opts)))
self.assertEqual(dict,type(options), msg="\nParser gave incorrect type for options")
self.assertEqual(list,type(tgt_opts), msg="\nParser gave incorrect type for tgt_opts")
for target in tgt_opts:
self.assertEqual(dict, type(target), msg="\nParser gave incorrect type for target dict")
## The force field component of the project
self.logger.debug("Creating forcefield using loaded options: ")
forcefield = FF(options)
self.logger.debug(str(forcefield) + "\n")
self.assertEqual(FF, type(forcefield), msg="\nExpected forcebalance forcefield object")
## The objective function
self.logger.debug("Creating object using loaded options and forcefield: ")
objective = Objective(options, tgt_opts, forcefield)
self.logger.debug(str(objective) + "\n")
self.assertEqual(Objective, type(objective), msg="\nExpected forcebalance objective object")
## The optimizer component of the project
self.logger.debug("Creating optimizer: ")
optimizer = Optimizer(options, objective, forcefield)
self.logger.debug(str(optimizer) + "\n")
self.assertEqual(Optimizer, type(optimizer), msg="\nExpected forcebalance optimizer object")
## Actually run the optimizer.
self.logger.debug("Done setting up! Running optimizer...\n")
result = optimizer.Run()
self.logger.debug("\nOptimizer finished. Final results:\n")
self.logger.debug(str(result) + '\n')
self.assertNdArrayEqual(EXPECTED_BROMINE_RESULTS,result,delta=0.02,
msg="\nCalculation results have changed from previously calculated values.\n"
"If this seems reasonable, update EXPECTED_BROMINE_RESULTS in test_system.py with these values")
class TestLipidStudy(ForceBalanceTestCase):
def setUp(self):
super(ForceBalanceTestCase,self).setUp()
os.chdir('studies/010_lipid_study')
def tearDown(self):
os.system('rm -rf results *.bak *.tmp')
super(ForceBalanceTestCase,self).tearDown()
def runTest(self):
"""Check lipid tutorial study runs without errors"""
self.logger.debug("\nSetting input file to 'options.in'\n")
input_file='simple.in'
## The general options and target options that come from parsing the input file
self.logger.debug("Parsing inputs...\n")
options, tgt_opts = parse_inputs(input_file)
self.logger.debug("options:\n%s\n\ntgt_opts:\n%s\n\n" % (str(options), str(tgt_opts)))
self.assertEqual(dict,type(options), msg="\nParser gave incorrect type for options")
self.assertEqual(list,type(tgt_opts), msg="\nParser gave incorrect type for tgt_opts")
for target in tgt_opts:
self.assertEqual(dict, type(target), msg="\nParser gave incorrect type for target dict")
## The force field component of the project
forcefield = FF(options)
self.assertEqual(FF, type(forcefield), msg="\nExpected forcebalance forcefield object")
## The objective function
objective = Objective(options, tgt_opts, forcefield)
self.assertEqual(Objective, type(objective), msg="\nExpected forcebalance objective object")
## The optimizer component of the project
self.logger.debug("Creating optimizer: ")
optimizer = Optimizer(options, objective, forcefield)
self.assertEqual(Optimizer, type(optimizer), msg="\nExpected forcebalance optimizer object")
self.logger.debug(str(optimizer) + "\n")
## Actually run the optimizer.
self.logger.debug("Done setting up! Running optimizer...\n")
result = optimizer.Run()
self.logger.debug("\nOptimizer finished. Final results:\n")
self.logger.debug(str(result) + '\n')
self.assertNdArrayEqual(absolute(EXPECTED_LIPID_RESULTS),absolute(result),delta=0.010,
msg="\nCalculation results have changed from previously calculated values.\n"
"If this seems reasonable, update EXPECTED_LIPID_RESULTS in test_system.py with these values (%s)" % result)
# Fail if calculation takes longer than previously to converge
self.assertGreaterEqual(ITERATIONS_TO_CONVERGE, Counter(), msg="\nCalculation took longer than expected to converge (%d iterations vs previous of %d)" %\
(ITERATIONS_TO_CONVERGE, Counter()))
class TestImplicitSolventHFEStudy(ForceBalanceTestCase):
def setUp(self):
super(ForceBalanceTestCase,self).setUp()
os.chdir('studies/012_implicit_solvent_hfe')
def tearDown(self):
os.system('rm -rf results *.bak *.tmp')
super(ForceBalanceTestCase,self).tearDown()
def runTest(self):
"""Check implicit hydration free energy study (Hydration target) converges to expected results"""
self.logger.debug("\nSetting input file to 'optimize.in'\n")
input_file='optimize.in'
## The general options and target options that come from parsing the input file
self.logger.debug("Parsing inputs...\n")
options, tgt_opts = parse_inputs(input_file)
self.logger.debug("options:\n%s\n\ntgt_opts:\n%s\n\n" % (str(options), str(tgt_opts)))
self.assertEqual(dict,type(options), msg="\nParser gave incorrect type for options")
self.assertEqual(list,type(tgt_opts), msg="\nParser gave incorrect type for tgt_opts")
for target in tgt_opts:
self.assertEqual(dict, type(target), msg="\nParser gave incorrect type for target dict")
## The force field component of the project
self.logger.debug("Creating forcefield using loaded options: ")
forcefield = FF(options)
self.logger.debug(str(forcefield) + "\n")
self.assertEqual(FF, type(forcefield), msg="\nExpected forcebalance forcefield object")
## The objective function
self.logger.debug("Creating object using loaded options and forcefield: ")
objective = Objective(options, tgt_opts, forcefield)
self.logger.debug(str(objective) + "\n")
self.assertEqual(Objective, type(objective), msg="\nExpected forcebalance objective object")
## The optimizer component of the project
self.logger.debug("Creating optimizer: ")
optimizer = Optimizer(options, objective, forcefield)
self.logger.debug(str(optimizer) + "\n")
self.assertEqual(Optimizer, type(optimizer), msg="\nExpected forcebalance optimizer object")
## Actually run the optimizer.
self.logger.debug("Done setting up! Running optimizer...\n")
result = optimizer.Run()
self.logger.debug("\nOptimizer finished. Final results:\n")
self.logger.debug(str(result) + '\n')
self.assertNdArrayEqual(EXPECTED_ETHANOL_RESULTS,forcefield.create_pvals(result),delta=0.02,
msg="\nCalculation results have changed from previously calculated values.\n"
"If this seems reasonable, update EXPECTED_ETHANOL_RESULTS in test_system.py with these values")
if __name__ == '__main__':
unittest.main()
| 49.555874
| 161
| 0.679387
| 2,114
| 17,295
| 5.491485
| 0.110218
| 0.05513
| 0.082694
| 0.03101
| 0.873374
| 0.873374
| 0.873374
| 0.873374
| 0.864071
| 0.860109
| 0
| 0.01975
| 0.20954
| 17,295
| 348
| 162
| 49.698276
| 0.82942
| 0.140792
| 0
| 0.802575
| 0
| 0
| 0.299621
| 0.032245
| 0
| 0
| 0
| 0
| 0.184549
| 1
| 0.077253
| false
| 0
| 0.060086
| 0
| 0.16309
| 0.004292
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2eafff2dbd5bbfdb60e24672e7755799ba6c197c
| 103
|
py
|
Python
|
Session-1/Functions/S1F2.py
|
saianuragpeddu/python-assignemts
|
a6bb192f2c0ef8ea86531c1a98f1b76150fa474b
|
[
"MIT"
] | null | null | null |
Session-1/Functions/S1F2.py
|
saianuragpeddu/python-assignemts
|
a6bb192f2c0ef8ea86531c1a98f1b76150fa474b
|
[
"MIT"
] | null | null | null |
Session-1/Functions/S1F2.py
|
saianuragpeddu/python-assignemts
|
a6bb192f2c0ef8ea86531c1a98f1b76150fa474b
|
[
"MIT"
] | 1
|
2019-07-06T02:37:58.000Z
|
2019-07-06T02:37:58.000Z
|
def subtractNumber(x, y):
return x - y
print(subtractNumber(20,7))
print(subtractNumber(-20,-7))
| 17.166667
| 29
| 0.68932
| 15
| 103
| 4.733333
| 0.533333
| 0.056338
| 0.591549
| 0.619718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 0.145631
| 103
| 5
| 30
| 20.6
| 0.738636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
|
0
| 7
|
2ecc4ae0a8178f68554dfb8829afd8bdf8f4a26b
| 150
|
py
|
Python
|
model/word_kafka_message.py
|
Scandinaf/ll_free
|
7d35dce5955f11e4af52400f961c76c9904c2f05
|
[
"Apache-2.0"
] | null | null | null |
model/word_kafka_message.py
|
Scandinaf/ll_free
|
7d35dce5955f11e4af52400f961c76c9904c2f05
|
[
"Apache-2.0"
] | null | null | null |
model/word_kafka_message.py
|
Scandinaf/ll_free
|
7d35dce5955f11e4af52400f961c76c9904c2f05
|
[
"Apache-2.0"
] | null | null | null |
class WordKafkaMessage:
def __init__(self, word):
self.word = word
def build_kafka_message(self):
return {'word' : self.word}
| 25
| 35
| 0.64
| 18
| 150
| 5
| 0.555556
| 0.266667
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253333
| 150
| 6
| 35
| 25
| 0.803571
| 0
| 0
| 0
| 0
| 0
| 0.02649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
258b8c10658d3bc35caed2b44392b09823634b47
| 198
|
py
|
Python
|
qca_tools/__init__.py
|
joseppinilla/qca-tools
|
786591ba5d3542476e7d778d782f69ad165b8d7e
|
[
"MIT"
] | 1
|
2018-12-05T20:06:59.000Z
|
2018-12-05T20:06:59.000Z
|
qca_tools/__init__.py
|
joseppinilla/qca-tools
|
786591ba5d3542476e7d778d782f69ad165b8d7e
|
[
"MIT"
] | null | null | null |
qca_tools/__init__.py
|
joseppinilla/qca-tools
|
786591ba5d3542476e7d778d782f69ad165b8d7e
|
[
"MIT"
] | null | null | null |
from qca_tools import qca_network
from qca_tools.qca_network import *
from qca_tools import composite
from qca_tools.composite import *
from qca_tools import parse_qca
from qca_tools import auxil
| 22
| 35
| 0.848485
| 33
| 198
| 4.818182
| 0.242424
| 0.264151
| 0.45283
| 0.45283
| 0.301887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131313
| 198
| 8
| 36
| 24.75
| 0.924419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
258d442a0a5e15257f564ff4ce8861bcab8035ca
| 30
|
py
|
Python
|
Chapter 01/Chap01_Example1.150.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.150.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.150.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
s5 = {1,2,3,4}
print(s5[0])
| 10
| 15
| 0.466667
| 8
| 30
| 1.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0.2
| 30
| 2
| 16
| 15
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
25e6c8f5cc85c6d2c471e37f8989aadb8f738556
| 13,409
|
py
|
Python
|
tests/test_function_wrapper.py
|
ionelmc/wrapt
|
4abbac872ccf0c253374277ce7c72f188b8469b7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_function_wrapper.py
|
ionelmc/wrapt
|
4abbac872ccf0c253374277ce7c72f188b8469b7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_function_wrapper.py
|
ionelmc/wrapt
|
4abbac872ccf0c253374277ce7c72f188b8469b7
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function
import unittest
import wrapt
import wrapt.wrappers
from wrapt import six
class TestClassInheritence(unittest.TestCase):
def test_function_type_inheritence(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
@_decorator
def _function(*args, **kwargs):
return args, kwargs
self.assertTrue(isinstance(_function, wrapt.FunctionWrapper))
self.assertTrue(isinstance(_function, wrapt.ObjectProxy))
def test_instancemethod_type_inheritence(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
class Class(object):
@_decorator
def function(self, args, **kwargs):
return args, kwargs
self.assertTrue(isinstance(function, wrapt.FunctionWrapper))
self.assertTrue(isinstance(function, wrapt.ObjectProxy))
instance = Class()
self.assertFalse(isinstance(instance.function, wrapt.FunctionWrapper))
self.assertTrue(isinstance(instance.function, wrapt.ObjectProxy))
def test_classmethod_type_inheritence(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
class Class(object):
@_decorator
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
self.assertTrue(isinstance(function, wrapt.FunctionWrapper))
self.assertTrue(isinstance(function, wrapt.ObjectProxy))
instance = Class()
self.assertFalse(isinstance(instance.function, wrapt.FunctionWrapper))
self.assertTrue(isinstance(instance.function, wrapt.ObjectProxy))
def test_staticmethod_type_inheritence(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
class Class(object):
@_decorator
@staticmethod
def function(*args, **kwargs):
return args, kwargs
self.assertTrue(isinstance(function, wrapt.FunctionWrapper))
self.assertTrue(isinstance(function, wrapt.ObjectProxy))
instance = Class()
self.assertFalse(isinstance(instance.function, wrapt.FunctionWrapper))
self.assertTrue(isinstance(instance.function, wrapt.ObjectProxy))
class TestAttributeAccess(unittest.TestCase):
def test_function_attributes(self):
def decorator1(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
decorator2 = wrapt.decorator(decorator1)
def function1(*args, **kwargs):
return args, kwargs
function2 = decorator2(function1)
self.assertEqual(function2.__wrapped__, function1)
self.assertEqual(function2._self_wrapper, decorator1)
self.assertEqual(function2._self_binding, 'function')
def test_instancemethod_attributes(self):
def decorator1(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
decorator2 = wrapt.decorator(decorator1)
class Class(object):
def function1(self, *args, **kwargs):
return args, kwargs
function2 = decorator2(function1)
self.assertEqual(function2.__wrapped__, function1)
self.assertEqual(function2._self_wrapper, decorator1)
self.assertEqual(function2._self_binding, 'function')
instance = Class()
self.assertEqual(instance.function2.__wrapped__, instance.function1)
self.assertEqual(instance.function2._self_instance, instance)
self.assertEqual(instance.function2._self_wrapper, decorator1)
def test_classmethod_attributes(self):
def decorator1(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
decorator2 = wrapt.decorator(decorator1)
class Class(object):
@classmethod
def function1(cls, *args, **kwargs):
return args, kwargs
function2 = decorator2(function1)
self.assertEqual(function2.__wrapped__, function1)
self.assertEqual(function2._self_wrapper, decorator1)
self.assertEqual(function2._self_binding, 'classmethod')
instance = Class()
self.assertEqual(instance.function2.__wrapped__, instance.function1)
self.assertEqual(instance.function2._self_instance, instance)
self.assertEqual(instance.function2._self_wrapper, decorator1)
def test_staticmethod_attributes(self):
def decorator1(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
decorator2 = wrapt.decorator(decorator1)
class Class(object):
@staticmethod
def function1(*args, **kwargs):
return args, kwargs
function2 = decorator2(function1)
self.assertEqual(function2.__wrapped__, function1)
self.assertEqual(function2._self_wrapper, decorator1)
self.assertEqual(function2._self_binding, 'staticmethod')
instance = Class()
self.assertEqual(instance.function2.__wrapped__, instance.function1)
self.assertEqual(instance.function2._self_instance, instance)
self.assertEqual(instance.function2._self_wrapper, decorator1)
class TestParentReference(unittest.TestCase):
def test_function_decorator(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
@_decorator
def function():
pass
self.assertEqual(function._self_parent, None)
def test_class_decorator(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
@_decorator
class Class:
pass
self.assertEqual(Class._self_parent, None)
def test_instancemethod(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
class Class:
@_decorator
def function_im(self):
pass
c = Class()
self.assertNotEqual(c.function_im._self_parent, None)
self.assertNotEqual(Class.function_im._self_parent, None)
def test_classmethod(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
class Class:
@_decorator
@classmethod
def function_cm(cls):
pass
self.assertNotEqual(Class.function_cm._self_parent, None)
def test_staticmethod_inner(self):
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
class Class:
@_decorator
@staticmethod
def function_sm_inner():
pass
self.assertNotEqual(Class.function_sm_inner._self_parent, None)
class TestGuardArgument(unittest.TestCase):
def test_boolean_false_guard_on_decorator(self):
@wrapt.decorator(enabled=False)
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
@_decorator
def function():
pass
self.assertFalse(isinstance(function, wrapt.FunctionWrapper))
def test_boolean_true_guard_on_decorator(self):
@wrapt.decorator(enabled=True)
def _decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
@_decorator
def function():
pass
self.assertTrue(isinstance(function, wrapt.FunctionWrapper))
def test_boolean_dynamic_guard_on_decorator(self):
class Guard(object):
value = True
def __nonzero__(self):
return self.value
__bool__ = __nonzero__
guard = Guard()
result = []
@wrapt.decorator(enabled=guard)
def _decorator(wrapped, instance, args, kwargs):
result.append(1)
return wrapped(*args, **kwargs)
@_decorator
def function():
pass
self.assertTrue(isinstance(function, wrapt.FunctionWrapper))
function()
self.assertNotEqual(len(result), 0)
result = []
guard.value = False
function()
self.assertEqual(len(result), 0)
def test_function_guard_on_decorator(self):
value = True
def guard():
return value
result = []
@wrapt.decorator(enabled=guard)
def _decorator(wrapped, instance, args, kwargs):
result.append(1)
return wrapped(*args, **kwargs)
@_decorator
def function():
pass
self.assertTrue(isinstance(function, wrapt.FunctionWrapper))
function()
self.assertNotEqual(len(result), 0)
result = []
value = False
function()
self.assertEqual(len(result), 0)
def test_guard_on_instancemethod(self):
value = True
def guard():
return value
result = []
@wrapt.decorator(enabled=guard)
def _decorator(wrapped, instance, args, kwargs):
result.append(1)
return wrapped(*args, **kwargs)
class Class(object):
@_decorator
def function(self):
pass
c = Class()
self.assertTrue(isinstance(c.function, wrapt.BoundFunctionWrapper))
c.function()
self.assertNotEqual(len(result), 0)
result = []
value = False
self.assertTrue(isinstance(c.function, wrapt.BoundFunctionWrapper))
c.function()
self.assertEqual(len(result), 0)
class TestDerivedFunctionWrapper(unittest.TestCase):
def test_override_bound_type(self):
class _BoundFunctionWrapper(wrapt.BoundFunctionWrapper):
ATTRIBUTE = 1
class _FunctionWrapper(wrapt.FunctionWrapper):
__bound_function_wrapper__ = _BoundFunctionWrapper
def function():
pass
def wrapper(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
_wrapper = _FunctionWrapper(function, wrapper)
self.assertTrue(isinstance(_wrapper, _FunctionWrapper))
instance = object()
_bound_wrapper = _wrapper.__get__(instance, type(instance))
self.assertTrue(isinstance(_bound_wrapper, _BoundFunctionWrapper))
self.assertEqual(_bound_wrapper.ATTRIBUTE, 1)
class TestFunctionBinding(unittest.TestCase):
def test_double_binding(self):
def function():
pass
def wrapper(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
_wrapper = wrapt.FunctionWrapper(function, wrapper)
self.assertTrue(isinstance(_wrapper, wrapt.FunctionWrapper))
instance = object()
_bound_wrapper_1 = _wrapper.__get__(instance, type(instance))
self.assertTrue(_bound_wrapper_1._self_parent is _wrapper)
self.assertTrue(isinstance(_bound_wrapper_1,
wrapt.BoundFunctionWrapper))
self.assertEqual(_bound_wrapper_1._self_instance, instance)
_bound_wrapper_2 = _bound_wrapper_1.__get__(instance, type(instance))
self.assertTrue(_bound_wrapper_2._self_parent is _wrapper)
self.assertTrue(isinstance(_bound_wrapper_2,
wrapt.BoundFunctionWrapper))
self.assertEqual(_bound_wrapper_2._self_instance,
_bound_wrapper_1._self_instance)
self.assertTrue(_bound_wrapper_1 is _bound_wrapper_2)
def test_re_bind_after_none(self):
def function():
pass
def wrapper(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
_wrapper = wrapt.FunctionWrapper(function, wrapper)
self.assertTrue(isinstance(_wrapper, wrapt.FunctionWrapper))
instance = object()
_bound_wrapper_1 = _wrapper.__get__(None, type(instance))
self.assertTrue(_bound_wrapper_1._self_parent is _wrapper)
self.assertTrue(isinstance(_bound_wrapper_1,
wrapt.BoundFunctionWrapper))
self.assertEqual(_bound_wrapper_1._self_instance, None)
_bound_wrapper_2 = _bound_wrapper_1.__get__(instance, type(instance))
self.assertTrue(_bound_wrapper_2._self_parent is _wrapper)
self.assertTrue(isinstance(_bound_wrapper_2,
wrapt.BoundFunctionWrapper))
self.assertEqual(_bound_wrapper_2._self_instance, instance)
self.assertTrue(_bound_wrapper_1 is not _bound_wrapper_2)
class TestInvalidWrapper(unittest.TestCase):
def test_none_for_wrapped(self):
def run(*args):
def _wrapper(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
wrapper = wrapt.FunctionWrapper(None, _wrapper)
wrapper.__get__(list(), list)()
self.assertRaises(AttributeError, run, ())
if __name__ == '__main__':
unittest.main()
| 29.797778
| 78
| 0.640167
| 1,268
| 13,409
| 6.502366
| 0.071767
| 0.072771
| 0.052395
| 0.066707
| 0.839539
| 0.78581
| 0.78205
| 0.742389
| 0.741419
| 0.736568
| 0
| 0.009902
| 0.269446
| 13,409
| 449
| 79
| 29.864143
| 0.831768
| 0
| 0
| 0.714286
| 0
| 0
| 0.003505
| 0
| 0
| 0
| 0
| 0
| 0.237013
| 1
| 0.220779
| false
| 0.042208
| 0.016234
| 0.097403
| 0.412338
| 0.003247
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
25f37ba8eac73c13a59aa0d928f00476f37ee9fe
| 23,445
|
py
|
Python
|
lab4/src/lab4.py
|
derry95922/STV
|
d6f3893c6c6812dc35970513d83e375f4a90c7c9
|
[
"Apache-2.0"
] | null | null | null |
lab4/src/lab4.py
|
derry95922/STV
|
d6f3893c6c6812dc35970513d83e375f4a90c7c9
|
[
"Apache-2.0"
] | 1
|
2022-01-21T23:23:56.000Z
|
2022-01-21T23:23:56.000Z
|
lab4/src/lab4.py
|
derry95922/STV
|
d6f3893c6c6812dc35970513d83e375f4a90c7c9
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
from datetime import datetime
def wait_element_is_visible(driver, locate, timeout=10):
try:
wait = WebDriverWait(driver, timeout)
element = wait.until(EC.visibility_of_element_located((By.XPATH, locate)))
except Exception:
driver.quit()
finally:
return element
def wait_plus_icon_is_visible(driver, title, subtitle, timeout=10):
try:
wait = WebDriverWait(driver, timeout)
element = wait.until(EC.visibility_of_element_located((By.XPATH, "//*[@data-section-label='%s']//*[contains(@data-list-path,'%s')]//a[@title='Create']" % (title, subtitle))))
except Exception:
driver.quit()
finally:
return element
def wait_web_herf_is_visible(driver, subtitle, timeout=10):
try:
wait = WebDriverWait(driver, timeout)
element = wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(@data-list-path,'%s')]//*[@class='dashboard-group__list-label']" % (subtitle))))
except Exception:
driver.quit()
finally:
return element
def input_text(driver, locate, text):
try:
wait_element_is_visible(driver, locate)
driver.find_element_by_xpath(locate).send_keys(text)
except Exception:
driver.quit()
def select_dropdown_by_field_name(driver, field_name, target, timeout=10):
try:
wait_element_is_visible(driver, "//*[@for='%s']//*[@class='Select-multi-value-wrapper']" % (field_name)).click()
wait_element_is_visible(driver, "//*[@for='%s']//*[text()='%s']" % (field_name, target)).click()
except Exception:
driver.quit()
def login():
driver = webdriver.Chrome()
driver.get("http://127.0.0.1:3000/")
admin = {'account':'demo@keystonejs.com','password':'demo'}
driver.maximize_window()
wait_element_is_visible(driver, "//*[text()='Sign in']").click()
input_text(driver, "//*[@name='email']",admin['account'])
input_text(driver, "//*[@name='password']",admin['password'])
wait_element_is_visible(driver, "//*[text()='Sign In']").click()
wait_element_is_visible(driver, "//div[text()='Demo']")
return driver
def logout(driver):
wait_element_is_visible(driver, "//*[@class='octicon octicon-sign-out']").click()
driver.quit()
def create_post(driver):
wait_plus_icon_is_visible(driver, 'Posts', 'posts').click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
input_text(driver, "//*[@name='name']", "CreatePost")
wait_element_is_visible(driver, "//*[@data-button-type='submit']").click()
wait_element_is_visible(driver, "//*[@class='css-2960tt']")
wait_element_is_visible(driver, "//*[text()='Save']").click()
wait_element_is_visible(driver, "//*[@data-list-path='posts']").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
assert "CreatePost" in driver.find_element_by_xpath("//*[text()='CreatePost']").text
wait_element_is_visible(driver, "//*[@class='octicon octicon-home']").click()
wait_element_is_visible(driver, "//div[text()='Demo']")
def delete_post(driver):
wait_element_is_visible(driver, "//*[contains(@data-list-path,'posts')]").click()
wait_element_is_visible(driver, "//*[@class='ItemList__col']//*[text()='CreatePost']/../preceding-sibling::*").click()
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[text()='No posts found...']")
noPosts = driver.find_element(By.XPATH, "//*[text()='No posts found...']").text
assert "No posts found..." == noPosts
def create_comment(driver):
wait_element_is_visible(driver, "//*[@class='octicon octicon-home']").click()
wait_element_is_visible(driver, "//div[text()='Demo']")
wait_plus_icon_is_visible(driver, 'Posts', 'comments').click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
select_dropdown_by_field_name(driver, "author", "Demo User")
select_dropdown_by_field_name(driver, "post", "CreatePost")
wait_element_is_visible(driver, "//*[@data-button-type='submit']").click()
wait_element_is_visible(driver, "//*[@class='css-2960tt']")
wait_element_is_visible(driver, "//*[text()='Save']").click()
wait_element_is_visible(driver, "//*[@class='css-ctpeu']")
assert "Your changes have been saved successfully" in driver.find_element_by_xpath("//*[@class='css-ctpeu']").text
actualID = driver.find_element(By.XPATH, "//*[@class='EditForm__name-field']").text
wait_element_is_visible(driver, "//*[contains(@data-list-path,'comments')]").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
expectID = driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--id')]").text
assert actualID == expectID
def delete_comment(driver):
wait_element_is_visible(driver, "//*[contains(@data-list-path,'comments')]").click()
wait_element_is_visible(driver, "//*[@class='ItemList__col']//*[text()='CreatePost']/../preceding-sibling::*").click()
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[text()='No comments found...']")
noPosts = driver.find_element(By.XPATH, "//*[text()='No comments found...']").text
assert "No comments found..." == noPosts
def create_category(driver):
wait_plus_icon_is_visible(driver, 'Posts', 'categories').click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
input_text(driver, "//*[@name='name']", "CreateCategory")
wait_element_is_visible(driver, "//*[@data-button-type='submit']").click()
wait_element_is_visible(driver, "//*[contains(@data-list-path,'categories')]").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
assert "CreateCategory" in driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--text')]").text
def delete_category(driver):
wait_element_is_visible(driver, "//*[contains(@data-list-path,'categories')]").click()
wait_element_is_visible(driver, "//*[@class='css-1xkojxp']")
wait_element_is_visible(driver, "//*[@class='octicon octicon-trashcan']").click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[@class='css-l1jroy']")
def create_enquiry():
driver = webdriver.Chrome()
driver.get("http://127.0.0.1:3000/")
wait_element_is_visible(driver, "//li//*[text()='Contact']").click()
input_text(driver, "//*[@name='name.full']", "testName")
input_text(driver, "//*[@name='email']", "demo@keystonejs.com")
input_text(driver, "//*[@name='phone']", "0987654321")
wait_element_is_visible(driver, "//*[@name='enquiryType']").click()
wait_element_is_visible(driver, "//*[text()='Just leaving a message']").click()
input_text(driver, "//*[@name='message']", "testMessage")
wait_element_is_visible(driver, "//*[text()='Submit']").click()
wait_element_is_visible(driver, "//*[text()='Success!']")
assert "Success" in driver.find_element(By.XPATH, "//*[text()='Success!']").text
driver.quit()
def delete_enquiry(driver):
wait_element_is_visible(driver, "//a[text()='Enquiries']").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
wait_element_is_visible(driver, "//*[contains(@class,'ItemList__value--name')]/../preceding-sibling::*").click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[text()='No enquiries found...']")
noEnquiries = driver.find_element(By.XPATH, "//*[text()='No enquiries found...']").text
assert "No enquiries found..." == noEnquiries
class test_suite(unittest.TestCase):
# def setUp(self):
# print("")
# print('---setUp---')
# self.driver = login()
# def tearDown(self):
# print("")
# print('---teardown---')
# logout(self.driver)
def create_post_on_the_admin_ui_page(self):
print('---Create post on the Admin UI page---')
driver = login()
wait_plus_icon_is_visible(driver, 'Posts', 'posts').click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
input_text(driver, "//*[@name='name']", "CreatePost")
wait_element_is_visible(driver, "//*[@data-button-type='submit']").click()
wait_element_is_visible(driver, "//*[@class='css-2960tt']")
wait_element_is_visible(driver, "//*[text()='Save']").click()
wait_element_is_visible(driver, "//*[@data-list-path='posts']").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
assert "CreatePost" in driver.find_element_by_xpath("//*[text()='CreatePost']").text
delete_post(driver)
logout(driver)
def edit_post_on_the_admin_ui_page(self):
print('---edit_post_on_the_admin_ui_page---')
driver = login()
create_post(driver)
wait_web_herf_is_visible(driver, "posts").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
assert "Demo User" not in driver.find_element_by_xpath("//*[@class='ItemList__col' and not(child::*)]").text
wait_element_is_visible(driver, "//*[text()='CreatePost']").click()
wait_element_is_visible(driver, "//*[@class='css-1wrt3l9 field-type-relationship']")
select_dropdown_by_field_name(driver, "author", "Demo User")
# sleep(3)
ActionChains(driver).move_to_element(driver.find_element_by_xpath("//*[text()='Relationships']")).perform()
wait_element_is_visible(driver, "//*[@class='css-2960tt']").click() #save_btn
# sleep(1)
wait_element_is_visible(driver, "//*[@class='active']//*[text()='Posts']").click()
# sleep(3)
assert "Demo User" in driver.find_element_by_xpath("//*[contains(@class, 'ItemList__value--relationship')]").text
delete_post(driver)
logout(driver)
def search_posts_by_keyword_on_the_admin_ui_page(self):
print('---search_posts_by_keyword_on_the_admin_ui_page---')
driver = login()
create_post(driver)
wait_web_herf_is_visible(driver, "posts").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
input_text(driver, "//*[@class='css-foh633']", "CreatePost")
expectResult = driver.find_element(By.XPATH, "//*[@class='css-foh633']").get_attribute("value")
actualResult = driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--text')]").text
assert expectResult in actualResult
wait_element_is_visible(driver, "//*[@class='css-1h0bkr6']").click() #X_btn
input_text(driver, "//*[@class='css-foh633']", "expectNoResult")
expectNoResult = driver.find_element(By.XPATH, "//*[@class='css-foh633']").get_attribute("value")
assert expectNoResult not in actualResult
wait_element_is_visible(driver, "//*[@class='css-l1jroy']")
wait_element_is_visible(driver, "//*[@class='active']//*[text()='Posts']").click()
delete_post(driver)
logout(driver)
def delete_post_on_the_admin_ui_page(self):
print('---delete_post_on_the_admin_ui_page---')
driver = login()
create_post(driver)
wait_web_herf_is_visible(driver, "posts").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
wait_element_is_visible(driver, "//*[@class='ItemList__col']//*[text()='CreatePost']/../preceding-sibling::*").click()
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[text()='No posts found...']")
noPosts = driver.find_element(By.XPATH, "//*[text()='No posts found...']").text
assert "No posts found..." == noPosts
logout(driver)
def create_comment_on_admin_ui_page(self):
print('---Create post on the Admin UI page---')
driver = login()
create_post(driver)
wait_plus_icon_is_visible(driver, 'Posts', 'comments').click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
select_dropdown_by_field_name(driver, "author", "Demo User")
select_dropdown_by_field_name(driver, "post", "CreatePost")
wait_element_is_visible(driver, "//*[@data-button-type='submit']").click()
wait_element_is_visible(driver, "//*[@class='css-2960tt']")
wait_element_is_visible(driver, "//*[text()='Save']").click()
wait_element_is_visible(driver, "//*[@class='css-ctpeu']")
assert "Your changes have been saved successfully" in driver.find_element_by_xpath("//*[@class='css-ctpeu']").text
actualID = driver.find_element(By.XPATH, "//*[@class='EditForm__name-field']").text
wait_element_is_visible(driver, "//*[contains(@data-list-path,'comments')]").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
expectID = driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--id')]").text
assert actualID == expectID
delete_comment(driver)
delete_post(driver)
logout(driver)
def edit_comment_on_admin_ui_page(self):
print('---edit_comment_on_admin_ui_page---')
driver = login()
create_post(driver)
create_comment(driver)
wait_element_is_visible(driver, "//*[@class='octicon octicon-home']").click()
wait_web_herf_is_visible(driver, "comments").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
originState = driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--select')]").text
wait_element_is_visible(driver, "//*[contains(@class,'ItemList__value--id')]").click()
select_dropdown_by_field_name(driver, "commentState", "Archived")
wait_element_is_visible(driver, "//*[text()='Save']").click()
wait_element_is_visible(driver, "//*[contains(@data-list-path,'comments')]").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
actualState = driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--select')]").text
assert "Archived" in actualState
assert originState != actualState
delete_comment(driver)
delete_post(driver)
logout(driver)
def delete_comment_on_admin_ui_page(self):
print('---delete_comment_on_admin_ui_page---')
driver = login()
create_post(driver)
create_comment(driver)
wait_element_is_visible(driver, "//*[@class='octicon octicon-home']").click()
wait_web_herf_is_visible(driver, "comments").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
wait_element_is_visible(driver, "//*[contains(@class,'ItemList__value--id')]/../preceding-sibling::*").click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[text()='No comments found...']")
noPosts = driver.find_element(By.XPATH, "//*[text()='No comments found...']").text
assert "No comments found..." == noPosts
delete_post(driver)
logout(driver)
def create_category_on_admin_ui_page(self):
print('---create_category_on_admin_ui_page---')
driver = login()
wait_plus_icon_is_visible(driver, 'Posts', 'categories').click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
input_text(driver, "//*[@name='name']", "CreateCategory")
wait_element_is_visible(driver, "//*[@data-button-type='submit']").click()
wait_element_is_visible(driver, "//*[contains(@data-list-path,'categories')]").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
assert "CreateCategory" in driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--text')]").text
delete_category(driver)
logout(driver)
def show_posts_of_the_specific_category_by_pressing_category_name_on_the_blog_page(self):
print('---show_posts_of_the_specific_category_by_pressing_category_name_on_the_blog_page---')
driver = login()
create_category(driver)
logout(driver)
driver = login()
wait_element_is_visible(driver, "//*[@class='octicon octicon-home']").click() #createData
wait_plus_icon_is_visible(driver, 'Posts', 'posts').click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
input_text(driver, "//*[@name='name']", "CreatePost")
wait_element_is_visible(driver, "//*[@data-button-type='submit']").click()
wait_element_is_visible(driver, "//*[@class='css-1r0jf0q item-name-field']")
expectResult = driver.find_element(By.XPATH, "//*[@class='css-1r0jf0q item-name-field']").get_attribute("value")
select_dropdown_by_field_name(driver, "state", "Published")
ActionChains(driver).move_to_element(driver.find_element_by_xpath("//*[@class='Relationships']")).perform()
select_dropdown_by_field_name(driver, "categories", "CreateCategory")
wait_element_is_visible(driver, "//*[@class='css-2960tt']")
wait_element_is_visible(driver, "//*[text()='Save']").click()
wait_element_is_visible(driver, "//*[@class='octicon octicon-sign-out']").click() #logout
wait_element_is_visible(driver, "//*[@class='auth-box__brand']").click()
wait_element_is_visible(driver, "//li//*[text()='Blog']").click()
wait_element_is_visible(driver, "//*[@class='badge pull-right']").click()
actualResult = driver.find_element(By.XPATH, "//*[@class='media-heading']").text
assert actualResult == expectResult
driver.quit()
driver = login()
wait_web_herf_is_visible(driver, "posts").click()
delete_post(driver)
delete_category(driver)
logout(driver)
def create_enquiry_on_the_contact_page(self):
driver = webdriver.Chrome()
driver.get("http://127.0.0.1:3000/")
wait_element_is_visible(driver, "//li//*[text()='Contact']").click()
input_text(driver, "//*[@name='name.full']", "testName")
input_text(driver, "//*[@name='email']", "demo@keystonejs.com")
input_text(driver, "//*[@name='phone']", "0987654321")
wait_element_is_visible(driver, "//*[@name='enquiryType']").click()
wait_element_is_visible(driver, "//*[text()='Just leaving a message']").click()
input_text(driver, "//*[@name='message']", "testMessage")
actualName = driver.find_element(By.XPATH, "//*[@name='name.full']").get_attribute("value")
wait_element_is_visible(driver, "//*[text()='Submit']").click()
wait_element_is_visible(driver, "//*[text()='Success!']")
assert "Success" in driver.find_element(By.XPATH, "//*[text()='Success!']").text
driver.quit()
driver = login()
wait_element_is_visible(driver, "//a[text()='Enquiries']").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
expectName = driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--name')]").text
assert actualName == expectName
delete_enquiry(driver)
logout(driver)
def delete_enquiry_on_admin_ui_page(self):
create_enquiry()
driver = login()
wait_element_is_visible(driver, "//a[text()='Enquiries']").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
wait_element_is_visible(driver, "//*[contains(@class,'ItemList__value--name')]/../preceding-sibling::*").click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[text()='No enquiries found...']")
noEnquiries = driver.find_element(By.XPATH, "//*[text()='No enquiries found...']").text
assert "No enquiries found..." == noEnquiries
logout(driver)
def create_a_new_user_on_admin_ui_page(self):
driver = login()
ActionChains(driver).move_to_element(driver.find_element_by_xpath("//*[@data-section-label='Users']//*[contains(@data-list-path,'users')]//a[@title='Create']")).perform()
wait_plus_icon_is_visible(driver, "Users", "users").click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
input_text(driver, "//*[@name='name.first']", "firstName")
input_text(driver, "//*[@name='name.last']", "lastName")
mail_name = datetime.now().strftime('%Y%m%d%H%M%S')+"@gmail.com"
input_text(driver, "//*[@name='email']", mail_name)
input_text(driver, "//*[@name='password']", "ilove5278")
input_text(driver, "//*[@name='password_confirm']", "ilove5278")
wait_element_is_visible(driver, "//*[@data-button-type='submit']").click()
wait_element_is_visible(driver, "//*[@class='css-nil']")
input_text(driver, "//*[@name='phone']", "0987654321")
wait_element_is_visible(driver, "//*[@class='css-2960tt']")
wait_element_is_visible(driver, "//*[text()='Save']").click()
wait_element_is_visible(driver, "//*[contains(@data-list-path,'users')]").click()
wait_element_is_visible(driver, "//a[text()='Users']").click()
options = driver.find_elements(By.XPATH, "//*[contains(@class,'ItemList__value--email')]")
# for option in options:
# print(option.text)
assert mail_name in [option.text for option in options]
logout(driver)
def suite():
suite = unittest.TestSuite()
suite.addTests([test_suite('create_post_on_the_admin_ui_page'),
test_suite('edit_post_on_the_admin_ui_page'),
test_suite('search_posts_by_keyword_on_the_admin_ui_page'),
test_suite('delete_post_on_the_admin_ui_page'),
test_suite('create_comment_on_admin_ui_page'),
test_suite('edit_comment_on_admin_ui_page'),
test_suite('delete_comment_on_admin_ui_page'),
test_suite('create_category_on_admin_ui_page'),
test_suite('show_posts_of_the_specific_category_by_pressing_category_name_on_the_blog_page'),
test_suite('create_enquiry_on_the_contact_page'),
test_suite('delete_enquiry_on_admin_ui_page'),
# test_suite('create_a_new_user_on_admin_ui_page')
])
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| 51.640969
| 182
| 0.65515
| 2,803
| 23,445
| 5.164823
| 0.084909
| 0.088278
| 0.14713
| 0.174069
| 0.863577
| 0.834013
| 0.776128
| 0.734683
| 0.703392
| 0.656697
| 0
| 0.006826
| 0.156451
| 23,445
| 454
| 183
| 51.640969
| 0.725186
| 0.01254
| 0
| 0.654156
| 0
| 0.005362
| 0.325725
| 0.221199
| 0
| 0
| 0
| 0
| 0.067024
| 1
| 0.075067
| false
| 0.010724
| 0.024129
| 0
| 0.115282
| 0.024129
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d33905140851f3cf0e906e7e59a634e7ed71fd2f
| 15,063
|
py
|
Python
|
Scripts/ga_makespan_fix_inaccur.py
|
radical-experiments/campaign_manager
|
337660cf07a97933b9b516d6612353bd3f6592a8
|
[
"MIT"
] | null | null | null |
Scripts/ga_makespan_fix_inaccur.py
|
radical-experiments/campaign_manager
|
337660cf07a97933b9b516d6612353bd3f6592a8
|
[
"MIT"
] | null | null | null |
Scripts/ga_makespan_fix_inaccur.py
|
radical-experiments/campaign_manager
|
337660cf07a97933b9b516d6612353bd3f6592a8
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from random import gauss, uniform
def get_makespan(curr_plan, num_resources, workflow_inaccur, positive=False, dynamic_res=False):
'''
Calculate makespan
'''
under = False
reactive_resource_usage = [0] * num_resources
resource_usage = [0] * num_resources
expected = [0] * num_resources
tmp_idx = [0] * num_resources
for placement in curr_plan:
workflow = placement[0]
resource = placement[1]
resource_id = resource['id']
expected_finish = placement[3]
if dynamic_res:
perf = gauss(resource['performance'], resource['performance'] * 0.0644)
else:
perf = resource['performance']
if positive:
inaccur = uniform(0, workflow_inaccur)
else:
inaccur = uniform(-workflow_inaccur, workflow_inaccur)
exec_time = (workflow['num_oper'] * (1 + inaccur)) / perf
reactive_resource_usage[resource_id - 1] += exec_time
resource_usage[resource_id - 1] = max(resource_usage[resource_id - 1] + exec_time, expected_finish)
expected[resource_id - 1] = expected_finish
tmp_idx[resource_id - 1] += 1
return max(resource_usage), max(reactive_resource_usage), max(expected)
# ------------------------------------------------------------------------------
# 5%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_5perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.05, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_5perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.05, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_5perc.csv', index=False)
# ------------------------------------------------------------------------------
# 10%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_10perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.1, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_10perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.1, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_10perc.csv', index=False)
# ------------------------------------------------------------------------------
# 20%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_20perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.2, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_20perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.2, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_20perc.csv', index=False)
# ------------------------------------------------------------------------------
# 30%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_30perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.3, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_30perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.3, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_30perc.csv', index=False)
# ------------------------------------------------------------------------------
# 40%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_40perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.4, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_40perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.4, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_40perc.csv', index=False)
# ------------------------------------------------------------------------------
# 50%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_50perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.5, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_50perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.5, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_50perc.csv', index=False)
# ------------------------------------------------------------------------------
# 60%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_60perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.6, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_60perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.6, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_60perc.csv', index=False)
# ------------------------------------------------------------------------------
# 70%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_70perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.7, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_70perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.7, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_70perc.csv', index=False)
# ------------------------------------------------------------------------------
# 80%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_80perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.8, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_80perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.8, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_80perc.csv', index=False)
# ------------------------------------------------------------------------------
# 90%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_90perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.9, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_90perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.9, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_90perc.csv', index=False)
| 52.484321
| 131
| 0.667662
| 1,822
| 15,063
| 5.355653
| 0.056531
| 0.131174
| 0.122976
| 0.172166
| 0.930416
| 0.903259
| 0.903259
| 0.8967
| 0.8967
| 0.8967
| 0
| 0.02568
| 0.126203
| 15,063
| 286
| 132
| 52.667832
| 0.715697
| 0.056297
| 0
| 0.651376
| 0
| 0
| 0.279972
| 0.168336
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004587
| false
| 0
| 0.013761
| 0
| 0.022936
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6c8ff02a1279820f62231e838c9948bcfc71fa6f
| 36,887
|
py
|
Python
|
src/fts3rest/fts3rest/tests/functional/test_job_submission.py
|
Jar-win/fts-rest
|
4db0880cf328037b8587b4d16741c40959b47ad2
|
[
"Apache-2.0"
] | null | null | null |
src/fts3rest/fts3rest/tests/functional/test_job_submission.py
|
Jar-win/fts-rest
|
4db0880cf328037b8587b4d16741c40959b47ad2
|
[
"Apache-2.0"
] | null | null | null |
src/fts3rest/fts3rest/tests/functional/test_job_submission.py
|
Jar-win/fts-rest
|
4db0880cf328037b8587b4d16741c40959b47ad2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright notice:
# Copyright Members of the EMI Collaboration, 2013.
#
# See www.eu-emi.eu for details on the copyright holders
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import socket
import time
from nose.plugins.skip import SkipTest
from sqlalchemy.orm import scoped_session, sessionmaker
from fts3rest.tests import TestController
from fts3rest.lib.base import Session
from fts3.model import File, Job
import random
class TestJobSubmission(TestController):
"""
Tests job submission
"""
def _validate_submitted(self, job, no_vo=False, dn=TestController.TEST_USER_DN):
self.assertNotEqual(job, None)
files = job.files
self.assertNotEqual(files, None)
self.assertEqual(job.user_dn, dn)
if no_vo:
self.assertEqual(job.vo_name, 'TestUser@cern.ch')
else:
self.assertEqual(job.vo_name, 'testvo')
self.assertEqual(job.job_state, 'SUBMITTED')
self.assertEqual(job.source_se, 'root://source.es')
self.assertEqual(job.dest_se, 'root://dest.ch')
self.assertEqual(job.overwrite_flag, True)
self.assertEqual(job.verify_checksum, 'b')
self.assertEqual(job.job_type, 'N')
self.assertEqual(job.priority, 3)
self.assertIsNone(job.max_time_in_queue)
self.assertEqual(len(files), 1)
self.assertEqual(files[0].file_state, 'SUBMITTED')
self.assertEqual(files[0].source_surl, 'root://source.es/file')
self.assertEqual(files[0].source_se, 'root://source.es')
self.assertEqual(files[0].dest_se, 'root://dest.ch')
self.assertEqual(files[0].file_index, 0)
self.assertEqual(files[0].selection_strategy, 'orderly')
self.assertEqual(files[0].user_filesize, 1024)
self.assertEqual(files[0].checksum, 'adler32:1234')
self.assertEqual(files[0].file_metadata['mykey'], 'myvalue')
if no_vo:
self.assertEqual(files[0].vo_name, 'TestUser@cern.ch')
else:
self.assertEqual(files[0].vo_name, 'testvo')
self.assertEquals(files[0].activity, 'default')
# Validate submitter
self.assertEqual(socket.getfqdn(), job.submit_host)
def test_submit(self):
"""
Submit a valid job
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(job_id, 0)
self._validate_submitted(Session.query(Job).get(job_id))
return str(job_id)
def test_submit_no_reuse(self):
"""
Submit a valid job no reuse
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True, 'reuse':False}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(job_id, 0)
self._validate_submitted(Session.query(Job).get(job_id))
return str(job_id)
def test_submit_no_reuse_N(self):
"""
Submit a valid job, using 'N' instead of False
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True, 'reuse':'N'}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(job_id, 0)
self._validate_submitted(Session.query(Job).get(job_id))
return str(job_id)
def test_submit_reuse(self):
"""
Submit a valid reuse job
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True, 'reuse': True}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was commited to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.job_type, 'Y')
return job_id
def test_submit_Y(self):
"""
Submit a valid reuse job, using 'Y' instead of True
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': 'Y', 'verify_checksum': 'Y', 'reuse': 'Y'}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was commited to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.job_type, 'Y')
def test_submit_post(self):
"""
Submit a valid job using POST instead of PUT
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
self._validate_submitted(Session.query(Job).get(job_id))
return job_id
def test_submit_with_port(self):
"""
Submit a valid job where the port is explicit in the url.
source_se and dest_se must exclude this port
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'srm://dest.ch:8447/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['srm://source.es:8446/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
db_job = Session.query(Job).get(job_id)
self.assertEqual(db_job.source_se, 'srm://source.es')
self.assertEqual(db_job.dest_se, 'srm://dest.ch')
self.assertEqual(db_job.files[0].source_se, 'srm://source.es')
self.assertEqual(db_job.files[0].dest_se, 'srm://dest.ch')
return job_id
def test_submit_only_query(self):
"""
Submit a valid job, without a path, but with a query in the url.
This is valid for some protocols (i.e. srm)
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['srm://source.es/?SFN=/path/'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {
'overwrite': True,
'copy_pin_lifetime': 3600,
'bring_online': 60,
'verify_checksum': True
}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
db_job = Session.query(Job).get(job_id)
self.assertEqual(db_job.job_state, 'STAGING')
self.assertEqual(db_job.files[0].file_state, 'STAGING')
self.assertEqual(db_job.copy_pin_lifetime, 3600)
self.assertEqual(db_job.bring_online, 60)
return job_id
def test_null_checksum(self):
"""
Valid job, with checksum explicitly set to null
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': None,
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.files[0].checksum, 'ADLER32')
return job_id
def test_checksum_no_verify(self):
"""
Valid job, with checksum, but verify_checksum is not set
In the DB, it must end as 'r' (compatibility with FTS3 behaviour)
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': '1234F',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.files[0].checksum, '1234F')
self.assertEqual(job.verify_checksum, 't')
return job_id
def test_verify_checksum_target(self):
"""
Valid job, verify checksum in destination.
In the DB, it must end as 'r' (compatibility with FTS3 behaviour) or destination
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': '1234F',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum':'target'}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.files[0].checksum, '1234F')
self.assertEqual(job.verify_checksum, 't')
return job_id
def test_verify_checksum_source(self):
"""
Valid job, verify checksum in source.
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': '1234F',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum':'source'}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.files[0].checksum, '1234F')
self.assertEqual(job.verify_checksum, 's')
return job_id
def test_verify_checksum_both(self):
"""
Valid job, verify checksum in source.
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': '1234F',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum':'both'}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.files[0].checksum, '1234F')
self.assertEqual(job.verify_checksum, 'b')
return job_id
def test_verify_checksum_none(self):
"""
Valid job, verify checksum none.
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum':'none'}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.verify_checksum, 'n')
return job_id
def test_null_user_filesize(self):
"""
Valid job, with filesize explicitly set to null
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'filesize': None,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was committed to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.files[0].user_filesize, 0)
return job_id
def test_no_vo(self):
"""
Submit a valid job with no VO data in the credentials (could happen with plain SSL!)
The job must be accepted, but assigned to the user's 'virtual' vo.
"""
self.setup_gridsite_environment(no_vo=True)
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was commited to the DB
self.assertGreater(len(job_id), 0)
self._validate_submitted(Session.query(Job).get(job_id), no_vo=True)
def test_no_vo_proxy(self):
"""
Submit a valid job with no VO data in the credentials, but still being a proxy.
The job must be accepted, but assigned to the user's 'virtual' vo.
"""
proxy_dn = self.TEST_USER_DN + '/CN=proxy'
self.setup_gridsite_environment(no_vo=True, dn=proxy_dn)
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was commited to the DB
self.assertGreater(len(job_id), 0)
self._validate_submitted(Session.query(Job).get(job_id), no_vo=True, dn=proxy_dn)
def test_retry(self):
"""
Submit with a specific retry value
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True, 'retry': 42, 'retry_delay': 123}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was commited to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self._validate_submitted(job)
self.assertEqual(job.retry, 42)
self.assertEqual(job.retry_delay, 123)
def test_with_activity(self):
"""
Submit a job specifiying activities for the files
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {'files': [
{
'sources': ['root://source.es/file'],
'destinations': [dest_surl],
'activity': 'my-activity'
},
{
'sources': ['https://source.es/file2'],
'destinations': ['https://dest.ch/file2'],
'activity': 'my-second-activity'
}]
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was commited to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self.assertEqual(job.files[0].activity, 'my-activity')
self.assertEqual(job.files[1].activity, 'my-second-activity')
def test_surl_with_spaces(self):
"""
Submit a job where the surl has spaces at the beginning and at the end
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['root://source.es/file\n \r '],
'destinations': ['\r\n'+dest_surl+'\n\n \n'],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024.0,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.put(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# Make sure it was commited to the DB
self.assertGreater(len(job_id), 0)
job = Session.query(Job).get(job_id)
self._validate_submitted(job)
def test_submit_different_protocols(self):
"""
Source and destination protocol mismatch
For REST <= 3.2.3, this used to be forbidden, but it was decided to allow it
https://its.cern.ch/jira/browse/FTS-97
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
job = Session.query(Job).get(job_id)
self.assertEqual(1, len(job.files))
self.assertEqual('http://source.es:8446/file', job.files[0].source_surl)
self.assertEqual(dest_surl, job.files[0].dest_surl)
def test_submit_with_cloud_cred(self):
"""
Submit a job specifying cloud credentials
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['dropbox://dropbox.com/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {'overwrite': True, 'verify_checksum': True}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
job = Session.query(Job).get(job_id)
self.assertEqual(1, len(job.files))
self.assertEqual('dropbox://dropbox.com/file', job.files[0].source_surl)
self.assertEqual(dest_surl, job.files[0].dest_surl)
def test_submit_protocol_params(self):
"""
Submit a transfer specifying some protocol parameters
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': [dest_surl],
'selection_strategy': 'orderly',
'checksum': 'adler32:1234',
'filesize': 1024,
'metadata': {'mykey': 'myvalue'},
}],
'params': {
'overwrite': True,
'verify_checksum': True,
'timeout': 1234,
'nostreams': 42,
'buffer_size': 1025,
'strict_copy': True
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
job = Session.query(Job).get(job_id)
self.assertTrue(job.internal_job_params is not None)
params = job.internal_job_params.split(',')
self.assertIn('timeout:1234', params)
self.assertIn('nostreams:42', params)
self.assertIn('buffersize:1025', params)
self.assertIn('strict', params)
def test_submit_with_priority(self):
"""
Submit a job specifying the priority
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': [dest_surl],
}],
'params': {
'priority': 5,
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
job = Session.query(Job).get(job_id)
self.assertEqual(job.priority, 5)
def test_submit_max_time_in_queue(self):
"""
Submits a job specifying the maximum time it should stay in the queue.
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': [dest_surl],
}],
'params': {
'max_time_in_queue': 8
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
# See FTS-311
# max_time_in_queue was effectively ignored by FTS3
# Since FTS-311, this field stores the timestamp when the job expires
job = Session.query(Job).get(job_id)
self.assertGreater(job.max_time_in_queue, time.time())
self.assertLessEqual(job.max_time_in_queue, (8*60*60) + time.time())
def test_submit_max_time_in_queue_suffix(self):
"""
Submits a job specifying the maximum time it should stay in the queue.
Use a suffix.
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100))
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': [dest_surl],
}],
'params': {
'max_time_in_queue': '4s'
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
job = Session.query(Job).get(job_id)
self.assertGreater(job.max_time_in_queue, time.time())
self.assertLessEqual(job.max_time_in_queue, 8 + time.time())
def test_submit_max_time_in_queue_suffix2(self):
"""
Submits a job specifying the maximum time it should stay in the queue.
Use a suffix.
"""
self.setup_gridsite_environment()
self.push_delegation()
dest_surl = 'root://dest' + str(random.randint(0, 100))+'.ch:8447/file'
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': [dest_surl],
}],
'params': {
'max_time_in_queue': '2m'
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
job = Session.query(Job).get(job_id)
self.assertGreater(job.max_time_in_queue, time.time())
self.assertLessEqual(job.max_time_in_queue, 120 + time.time())
def test_submit_ipv4(self):
"""
Submit a job with IPv4 only
"""
self.setup_gridsite_environment()
self.push_delegation()
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': ['root://destipv4.ch:8447/file'],
}],
'params': {
'ipv4': True
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
jobdb = Session.query(Job).get(job_id)
self.assertIn('ipv4', jobdb.internal_job_params)
self.assertNotIn('ipv6', jobdb.internal_job_params)
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': ['root://destipv4tofalse.ch:8447/file'],
}],
'params': {
'ipv4': False
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
jobdb = Session.query(Job).get(job_id)
self.assertTrue(jobdb.internal_job_params is None or 'ipv4' not in jobdb.internal_job_params)
self.assertTrue(jobdb.internal_job_params is None or 'ipv6' not in jobdb.internal_job_params)
def test_submit_ipv6(self):
"""
Submit a job with IPv6 only
"""
self.setup_gridsite_environment()
self.push_delegation()
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': ['root://destipv6.ch:8447/file'],
}],
'params': {
'ipv6': True
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
jobdb = Session.query(Job).get(job_id)
self.assertIn('ipv6', jobdb.internal_job_params)
self.assertNotIn('ipv4', jobdb.internal_job_params)
job = {
'files': [{
'sources': ['http://source.es:8446/file'],
'destinations': ['root://destipv6tofalse.ch:8447/file'],
}],
'params': {
'ipv6': False
}
}
job_id = self.app.post(
url="/jobs",
content_type='application/json',
params=json.dumps(job),
status=200
).json['job_id']
jobdb = Session.query(Job).get(job_id)
self.assertTrue(jobdb.internal_job_params is None or 'ipv4' not in jobdb.internal_job_params)
self.assertTrue(jobdb.internal_job_params is None or 'ipv6' not in jobdb.internal_job_params)
def test_files_balanced(self):
"""
Checks the distribution of the file 'hashed ids' is reasonably uniformely distributed.
hashed_id is a legacy name, its purpose is balance the transfers between hosts
regardless of the number running in a giving moment
"""
raise SkipTest('Disabled as it is not very reliable')
self.setup_gridsite_environment()
self.push_delegation()
files = []
for r in xrange(5000):
files.append({
'sources': ["root://source.es/file%d" % r],
'destinations': ["root://dest.ch/file%d%d" % (r,random.randint(0, 100))]
})
job = {'files': files}
job_id = self.app.put(
url="/jobs",
params=json.dumps(job),
status=200
).json['job_id']
files = Session.query(File.hashed_id).filter(File.job_id == job_id)
hashed_ids = map(lambda f: f.hashed_id, files)
# Null hypothesis: the distribution of hashed_ids is uniform
histogram, min_value, binsize, outsiders = scipy.stats.histogram(hashed_ids, defaultlimits=(0, 2 ** 16 - 1))
chisq, pvalue = scipy.stats.chisquare(histogram)
self.assertGreater(min_value, -1)
self.assertEqual(outsiders, 0)
self.assertGreater(pvalue, 0.1)
| 33.082511
| 116
| 0.530133
| 4,005
| 36,887
| 4.732584
| 0.090137
| 0.034557
| 0.027066
| 0.02026
| 0.795663
| 0.759207
| 0.73193
| 0.712778
| 0.705234
| 0.692677
| 0
| 0.028181
| 0.333342
| 36,887
| 1,114
| 117
| 33.112208
| 0.742589
| 0.043593
| 0
| 0.743621
| 0
| 0
| 0.188889
| 0.026152
| 0
| 0
| 0
| 0
| 0.122722
| 0
| null | null | 0
| 0.012151
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9f3793e8ff21b9190a86b7f2c1fd622151fdcb91
| 482
|
py
|
Python
|
0x05-python-exceptions/1-main.py
|
gogomillan/holbertonschool-higher_level_programming
|
1549ffc4fdc284271684321ff6edd882a314193a
|
[
"MIT"
] | 1
|
2022-02-07T12:13:18.000Z
|
2022-02-07T12:13:18.000Z
|
0x05-python-exceptions/1-main.py
|
Rmolimock/holbertonschool-higher_level_programming
|
cf0421cbb6463b3960dc581badf7d4bbe1622b7d
|
[
"MIT"
] | null | null | null |
0x05-python-exceptions/1-main.py
|
Rmolimock/holbertonschool-higher_level_programming
|
cf0421cbb6463b3960dc581badf7d4bbe1622b7d
|
[
"MIT"
] | 1
|
2021-12-06T18:15:54.000Z
|
2021-12-06T18:15:54.000Z
|
#!/usr/bin/python3
safe_print_integer = __import__('1-safe_print_integer').safe_print_integer
value = 89
has_been_print = safe_print_integer(value)
if not has_been_print:
print("{} is not an integer".format(value))
value = -89
has_been_print = safe_print_integer(value)
if not has_been_print:
print("{} is not an integer".format(value))
value = "Holberton"
has_been_print = safe_print_integer(value)
if not has_been_print:
print("{} is not an integer".format(value))
| 26.777778
| 74
| 0.755187
| 77
| 482
| 4.363636
| 0.233766
| 0.160714
| 0.285714
| 0.25
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0
| 0.014252
| 0.126556
| 482
| 17
| 75
| 28.352941
| 0.783848
| 0.03527
| 0
| 0.692308
| 0
| 0
| 0.19181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.769231
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
9f83cf7ae61d172b29ee4bd3da9e29dc6206447c
| 23,730
|
py
|
Python
|
skidl/libs/microchip_pic12mcu_sklib.py
|
arjenroodselaar/skidl
|
0bf801bd3b74e6ef94bd9aa1b68eef756b568276
|
[
"MIT"
] | 700
|
2016-08-16T21:12:50.000Z
|
2021-10-10T02:15:18.000Z
|
skidl/libs/microchip_pic12mcu_sklib.py
|
0dvictor/skidl
|
458709a10b28a864d25ae2c2b44c6103d4ddb291
|
[
"MIT"
] | 118
|
2016-08-16T20:51:05.000Z
|
2021-10-10T08:07:18.000Z
|
skidl/libs/microchip_pic12mcu_sklib.py
|
0dvictor/skidl
|
458709a10b28a864d25ae2c2b44c6103d4ddb291
|
[
"MIT"
] | 94
|
2016-08-25T14:02:28.000Z
|
2021-09-12T05:17:08.000Z
|
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
microchip_pic12mcu = SchLib(tool=SKIDL).add_parts(*[
Part(name='PIC12(L)F1501-I/P',dest=TEMPLATE,tool=SKIDL,do_erc=True,aliases=['PIC12(L)F1501-I/SN', 'PIC12(L)F1501-I/MS', 'PIC12(L)F1501-I/MC']),
Part(name='PIC12(L)F1822-I/P',dest=TEMPLATE,tool=SKIDL,do_erc=True,aliases=['PIC12(L)F1822-I/SN', 'PIC12(L)F1822-I/MC']),
Part(name='PIC12(L)F1840-I/P',dest=TEMPLATE,tool=SKIDL,do_erc=True,aliases=['PIC12(L)F1840-I/SN', 'PIC12(L)F1840-I/MC']),
Part(name='PIC12C508-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12C508, 512W EPROM, 25B SRAM, SO8 Wide',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12C508-I/SM', 'PIC12C508-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12C508A-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12C508A, 512W EPROM, 25B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12C508A-I/SN', 'PIC12C508A-I/SM', 'PIC12C508A-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12C509-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12C509, 1024W EPROM, 41B SRAM, SO8 Wide',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12C509-I/SM', 'PIC12C509-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12C509A-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12C509A, 1024W EPROM, 41B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12C509A-I/SN', 'PIC12C509A-I/SM', 'PIC12C509A-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12C671-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12C671, 1024W EPROM, 128B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12C671-I/SN', 'PIC12C671-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='CLKOUT/OSC2/AN3/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='INT/T0CKI/AN2/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='Vref/AN1/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12C672-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12C672, 2048W EPROM, 128B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12C672-I/SN', 'PIC12C672-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='CLKOUT/OSC2/AN3/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='INT/T0CKI/AN2/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='Vref/AN1/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12CE518-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12CE518, 512W EPROM, 25B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12CE518-I/SN', 'PIC12CE518-I/SM', 'PIC12CE518-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='OSC1/CLKIN/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12CE519-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12CE519, 1024W EPROM, 41B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12CE519-I/SN', 'PIC12CE519-I/SM', 'PIC12CE519-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12CE673-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='1024W EPROM, 128B SRAM, 128B EPROM, PDIP8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12CE673-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='CLKOUT/OSC2/AN3/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='INT/T0CKI/AN2/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='Vref/AN1/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12CE674-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='2048W EPROM, 128B SRAM, 128B EPROM, PDIP8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12CE674-I/JW'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='CLKOUT/OSC2/AN3/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='INT/T0CKI/AN2/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='Vref/AN1/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12CR509A-I/P',dest=TEMPLATE,tool=SKIDL,keywords='8-Bit CMOS Microcontroller',description='PIC12CR509A, 1024W ROM, 41B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12CR509A-I/SM', 'PIC12CR509A-I/SN'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F508-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F508, 512W Flash, 25B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F508-I/SN', 'PIC12F508-I/MS', 'PIC12F508-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F509-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F509, 1024W Flash, 41B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F509-I/SN', 'PIC12F509-I/MS', 'PIC12F509-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F510-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F510, 1024W FLASH, 38B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F510-I/SN', 'PIC12F510-I/MS', 'PIC12F510-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/C1OUTI/AN2/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/C1IN-/AN1/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/C1IN+/AN0/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F519-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F519, 1024W Flash, 41B SRAM, 64B EEPROM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F519-I/SN', 'PIC12F519-I/MS', 'PIC12F519-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F609-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F609, 1024W Flash, 64B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F609-I/SN', 'PIC12F609-I/MS', 'PIC12F609-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='CIN1-/~T1G~/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/INT/COUT/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='CIN0-/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='CIN+/ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F615-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F615, 1024W Flash, 64B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F615-I/SN', 'PIC12F615-I/MS', 'PIC12F615-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/P1A/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='AN3/CIN1-/~T1G~/P1B/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/~T1G~/GP3',do_erc=True),
Pin(num='5',name='AN2/T0CKI/INT/COUT/CCP1/P1A/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='AN1/CIN0-/Vref/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/CIN+/P1B/ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F617-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F617, 2048W Flash, 128B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F617-I/SN', 'PIC12F617-I/MS', 'PIC12F617-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/P1A/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='AN3/CIN1-/~T1G~/P1B/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/~T1G~/GP3',do_erc=True),
Pin(num='5',name='AN2/T0CKI/INT/COUT/CCP1/P1A/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='AN1/CIN0-/Vref/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/CIN+/P1B/ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F629-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F629, 1024W Flash, 64B SRAM, 128B EEPROM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F629-I/SN', 'PIC12F629-I/MS', 'PIC12F629-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='~T1G~/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/INT/COUT/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='CIN-/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='CIN+/ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F635-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F635, 1024W Flash, 64B SRAM, 128B EEPROM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F635-I/SN', 'PIC12F635-I/MS', 'PIC12F635-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='AN3/~T1G~/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='AN2/T0CKI/INT/COUT/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='AN1/CIN-/Vref/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/CIN+/ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F675-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F675, 1024W Flash, 64B SRAM, 128B EEPROM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F675-I/SN', 'PIC12F675-I/MS', 'PIC12F675-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='AN3/~T1G~/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='AN2/T0CKI/INT/COUT/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='AN1/CIN-/Vref/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/CIN+/ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F683-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F683, 2048W Flash, 128B SRAM, 256B EEPROM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F683-I/SN', 'PIC12F683-I/MS', 'PIC12F683-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='AN3/~T1G~/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='AN2/T0CKI/INT/COUT/CCCP1/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='AN1/CIN-/Vref/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/CIN+/ICSPDAT/ULPWU/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12F752-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller',description='PIC12F752, 1024W Flash, 64B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12F752-I/SN', 'PIC12F752-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12HV609-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller High Voltage',description='PIC12HV609, 1024W Flash, 64B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12HV609-I/SN', 'PIC12HV609-I/MS', 'PIC12HV609-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='CIN1-/~T1G~/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/INT/COUT/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='CIN0-/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='CIN+/ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12HV615-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller High Voltage',description='PIC12HV615, 1024W Flash, 64B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12HV615-I/SN', 'PIC12HV615-I/MS', 'PIC12HV615-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='T1CKI/P1A/CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='AN3/CIN1-/~T1G~/P1B/CLKOUT/OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/~T1G~/GP3',do_erc=True),
Pin(num='5',name='AN2/T0CKI/INT/COUT/CCP1/P1A/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='AN1/CIN0-/Vref/ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AN0/CIN+/P1B/ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12HV752-I/P',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller High Voltage',description='PIC12HV752, 1024W Flash, 64B SRAM, SO8',ref_prefix='U',num_units=1,do_erc=True,aliases=['PIC12HV752-I/SN', 'PIC12HV752-I/MC'],pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKIN/OSC1/GP5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSC2/GP4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/GP3',do_erc=True),
Pin(num='5',name='T0CKI/GP2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ICSPCLK/GP1',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='ICSPDAT/GP0',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='PIC12LF1840T48-I/ST',dest=TEMPLATE,tool=SKIDL,keywords='FLASH-Based 8-Bit CMOS Microcontroller XLP RF Transmitter',description='PIC12LF1840T48, 4kW FLASH, 256B SRAM, 256B EEPROM, RF, TSSOP14',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='RX/DT/CCP1/P1A/SRNQ/T1CKI/T1OSI/CLKIN/OSC1/RA5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='MDCIN2/T1G/P1B/TX/CK/SDO/CLKR/C1IN1-/T1OSO/CPS3/AN3/CLKOUT/OSC2/RA4',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='Vpp/~MCLR~/T1G/~SS~/RA3',do_erc=True),
Pin(num='5',name='VDDRF',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='CTRL',do_erc=True),
Pin(num='7',name='RFOUT',do_erc=True),
Pin(num='8',name='VSSRF',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='DATA',do_erc=True),
Pin(num='10',name='XTAL',do_erc=True),
Pin(num='11',name='AN2/CPS2/C1OUT/SRQ/T0CKI/CCP1/P1A/FLT0/SDA/SDI/INT/MDCIN1/RA2',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='AN1/CPS1/Vref/C1IN0-/SRI/RX/DT/SCL/SCK/MDMIN/ICSPCLK/RA1',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='AN0/CPS0/C1IN+/DACOUT/TX/CK/SDO/~SS~/P1B/MDOUT/ICSPDAT/RA0',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='VSS',func=Pin.PWRIN,do_erc=True)])])
| 91.976744
| 287
| 0.63287
| 3,984
| 23,730
| 3.691767
| 0.047942
| 0.085668
| 0.154202
| 0.159097
| 0.866331
| 0.862592
| 0.86028
| 0.855861
| 0.844778
| 0.842875
| 0
| 0.075141
| 0.145301
| 23,730
| 257
| 288
| 92.33463
| 0.650035
| 0
| 0
| 0.788235
| 0
| 0.015686
| 0.285124
| 0.046186
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003922
| 0
| 0.003922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9f9f7c439e5f08c96553827f2a2bfb1a36215be3
| 7,638
|
py
|
Python
|
src/rank_data.py
|
loggerhead/KDD_2012_Track1
|
c1cad6ab93e826ca63e692fbd61109dc8c514cf0
|
[
"WTFPL"
] | 5
|
2016-03-18T12:55:43.000Z
|
2019-10-11T01:37:07.000Z
|
src/rank_data.py
|
loggerhead/KDD_2012_Track1
|
c1cad6ab93e826ca63e692fbd61109dc8c514cf0
|
[
"WTFPL"
] | null | null | null |
src/rank_data.py
|
loggerhead/KDD_2012_Track1
|
c1cad6ab93e826ca63e692fbd61109dc8c514cf0
|
[
"WTFPL"
] | 4
|
2017-01-09T06:35:16.000Z
|
2019-05-08T09:32:38.000Z
|
public = [
0.44153, 0.43959, 0.42909, 0.42811, 0.42657, 0.42644, 0.42241, 0.42116, 0.41427, 0.40231, 0.40044, 0.40030, 0.39936, 0.39933, 0.39537, 0.39482, 0.39482, 0.39482, 0.39477, 0.39444, 0.39434, 0.39246, 0.39197, 0.38983, 0.38980, 0.38850, 0.38794, 0.38607, 0.38599, 0.38591, 0.38493, 0.38434, 0.38304, 0.38264, 0.38262, 0.38229, 0.38223, 0.38113, 0.38015, 0.37936, 0.37931, 0.37896, 0.37879, 0.37867, 0.37848, 0.37828, 0.37813, 0.37764, 0.37716, 0.37715, 0.37556, 0.37547, 0.37537, 0.37527, 0.37492, 0.37459, 0.37416, 0.37363, 0.37213, 0.36923, 0.36876, 0.36815, 0.36804, 0.36750, 0.36619, 0.36606, 0.36606, 0.36536, 0.36528, 0.36443, 0.36416, 0.36405, 0.36402, 0.36381, 0.36325, 0.36325, 0.36319, 0.36319, 0.36292, 0.36245, 0.36236, 0.36231, 0.36223, 0.36202, 0.36160, 0.36134, 0.36126, 0.36115, 0.36113, 0.36111, 0.36111, 0.36024, 0.36002, 0.35960, 0.35855, 0.35792, 0.35749, 0.35749, 0.35732, 0.35682, 0.35680, 0.35663, 0.35628, 0.35553, 0.35505, 0.35491, 0.35361, 0.35358, 0.35326, 0.35326, 0.35324, 0.35308, 0.35287, 0.35282, 0.35277, 0.35277, 0.35271, 0.35258, 0.35241, 0.35220, 0.35204, 0.35185, 0.35184, 0.35146, 0.35114, 0.35086, 0.35086, 0.35078, 0.35051, 0.35051, 0.35046, 0.35046, 0.35043, 0.35041, 0.35041, 0.35031, 0.35023, 0.35020, 0.35020, 0.35007, 0.35003, 0.34977, 0.34971, 0.34955, 0.34874, 0.34861, 0.34843, 0.34843, 0.34843, 0.34837, 0.34802, 0.34790, 0.34788, 0.34784, 0.34759, 0.34739, 0.34738, 0.34734, 0.34721, 0.34717, 0.34714, 0.34702, 0.34669, 0.34663, 0.34611, 0.34535, 0.34528, 0.34516, 0.34515, 0.34511, 0.34511, 0.34482, 0.34470, 0.34446, 0.34423, 0.34413, 0.34412, 0.34411, 0.34410, 0.34390, 0.34360, 0.34295, 0.34295, 0.34290, 0.34285, 0.34252, 0.34250, 0.34242, 0.34225, 0.34189, 0.34184, 0.34155, 0.34128, 0.34102, 0.34102, 0.34071, 0.34066, 0.33967, 0.33951, 0.33916, 0.33860, 0.33848, 0.33794, 0.33789, 0.33783, 0.33745, 0.33607, 0.33600, 0.33583, 0.33574, 0.33570, 0.33491, 0.33491, 0.33453, 0.33403, 0.33387, 0.33380, 0.33369, 0.33367, 0.33365, 0.33365, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33364, 0.33362, 0.33362, 0.33327, 0.33310, 0.33298, 0.33283, 0.33271, 0.33243, 0.33231, 0.33224, 0.33220, 0.33177, 0.33177, 0.33128, 0.33125, 0.33125, 0.33081, 0.33056, 0.33009, 0.32985, 0.32867,
]
private = [
0.42650, 0.41874, 0.41657, 0.41622, 0.41404, 0.41156, 0.40156, 0.38921, 0.38815, 0.38812, 0.38807, 0.38491, 0.38361, 0.38266, 0.38266, 0.38266, 0.38263, 0.38246, 0.37958, 0.37643, 0.37639, 0.37541, 0.37541, 0.37504, 0.37465, 0.37422, 0.37411, 0.37277, 0.37208, 0.37126, 0.37123, 0.37088, 0.37083, 0.37035, 0.37011, 0.36963, 0.36937, 0.36920, 0.36897, 0.36894, 0.36888, 0.36871, 0.36799, 0.36721, 0.36705, 0.36654, 0.36654, 0.36575, 0.36472, 0.36452, 0.36267, 0.36107, 0.36042, 0.36012, 0.35971, 0.35957, 0.35922, 0.35922, 0.35791, 0.35765, 0.35685, 0.35665, 0.35630, 0.35630, 0.35587, 0.35528, 0.35461, 0.35449, 0.35417, 0.35389, 0.35389, 0.35382, 0.35382, 0.35361, 0.35355, 0.35311, 0.35269, 0.35239, 0.35215, 0.35195, 0.35167, 0.35137, 0.35126, 0.35093, 0.35078, 0.35078, 0.34944, 0.34926, 0.34887, 0.34865, 0.34831, 0.34831, 0.34831, 0.34818, 0.34807, 0.34804, 0.34751, 0.34732, 0.34703, 0.34668, 0.34658, 0.34581, 0.34569, 0.34492, 0.34466, 0.34452, 0.34449, 0.34386, 0.34382, 0.34360, 0.34357, 0.34338, 0.34325, 0.34309, 0.34307, 0.34303, 0.34299, 0.34263, 0.34191, 0.34126, 0.34122, 0.34121, 0.34114, 0.34106, 0.34099, 0.34099, 0.34063, 0.34048, 0.34041, 0.34029, 0.34017, 0.33990, 0.33990, 0.33953, 0.33944, 0.33914, 0.33912, 0.33889, 0.33868, 0.33856, 0.33844, 0.33834, 0.33831, 0.33828, 0.33828, 0.33778, 0.33774, 0.33769, 0.33767, 0.33739, 0.33733, 0.33725, 0.33707, 0.33705, 0.33635, 0.33622, 0.33579, 0.33577, 0.33448, 0.33425, 0.33403, 0.33373, 0.33362, 0.33357, 0.33273, 0.33238, 0.33236, 0.33231, 0.33216, 0.33211, 0.33178, 0.33119, 0.32969, 0.32949, 0.32877, 0.32864, 0.32859, 0.32823, 0.32816, 0.32770, 0.32762, 0.32730, 0.32709, 0.32682, 0.32651, 0.32651, 0.32650, 0.32630, 0.32625, 0.32625, 0.32623, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32619, 0.32618, 0.32618, 0.32618, 0.32618, 0.32618, 0.32617, 0.32517, 0.32517, 0.32508, 0.32496, 0.32460, 0.32413, 0.32291, 0.32291, 0.32227, 0.32209, 0.32136, 0.32067, 0.32030, 0.32028, 0.32013, 0.32013, 0.31898, 0.31879, 0.31818, 0.31771, 0.31730, 0.31679, 0.31646, 0.31641, 0.31639, 0.31607, 0.31606, 0.31605, 0.31605, 0.31605, 0.31595, 0.31532, 0.31456, 0.31452, 0.31452, 0.31430, 0.31254, 0.31212, 0.31062, 0.31057, 0.31025, 0.30960, 0.30864, 0.30646, 0.30608, 0.30440, 0.30440, 0.30370, 0.30340, 0.30262, 0.30102, 0.29995, 0.29917, 0.29785, 0.29777, 0.29721,
]
| 1,091.142857
| 3,906
| 0.665488
| 1,692
| 7,638
| 3.004137
| 0.26182
| 0.213653
| 0.249262
| 0.424946
| 0.438127
| 0.419437
| 0.419437
| 0.419437
| 0.419437
| 0.413535
| 0
| 0.747898
| 0.112464
| 7,638
| 7
| 3,907
| 1,091.142857
| 0.001918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9fa27264940243277e7df5192484fcd3cf094a8b
| 121
|
py
|
Python
|
pyb4ml/modeling/__init__.py
|
ax-va/PyB4ML
|
309e8152a0f4e16a642522aa531c3f7aeb6aeb3a
|
[
"BSD-3-Clause"
] | null | null | null |
pyb4ml/modeling/__init__.py
|
ax-va/PyB4ML
|
309e8152a0f4e16a642522aa531c3f7aeb6aeb3a
|
[
"BSD-3-Clause"
] | null | null | null |
pyb4ml/modeling/__init__.py
|
ax-va/PyB4ML
|
309e8152a0f4e16a642522aa531c3f7aeb6aeb3a
|
[
"BSD-3-Clause"
] | null | null | null |
from pyb4ml.modeling.factor_graph.factor import Factor
from pyb4ml.modeling.factor_graph.factor_graph import FactorGraph
| 40.333333
| 65
| 0.884298
| 17
| 121
| 6.117647
| 0.411765
| 0.317308
| 0.346154
| 0.461538
| 0.673077
| 0.673077
| 0
| 0
| 0
| 0
| 0
| 0.017699
| 0.066116
| 121
| 2
| 66
| 60.5
| 0.902655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9fc444b04a27613f993a17751716a8ec8d7a0286
| 13,374
|
py
|
Python
|
axians_netbox_pdu/tests/test_pduconfig_views.py
|
jamesp37/axians-netbox-plugin-pdu
|
c934a3b4a2b57624ae604bfa3469dc7c114b4721
|
[
"Apache-2.0"
] | 29
|
2020-06-24T12:13:23.000Z
|
2022-03-30T11:41:30.000Z
|
axians_netbox_pdu/tests/test_pduconfig_views.py
|
jamesp37/axians-netbox-plugin-pdu
|
c934a3b4a2b57624ae604bfa3469dc7c114b4721
|
[
"Apache-2.0"
] | 13
|
2020-06-30T03:51:34.000Z
|
2021-12-02T12:22:18.000Z
|
axians_netbox_pdu/tests/test_pduconfig_views.py
|
jamesp37/axians-netbox-plugin-pdu
|
c934a3b4a2b57624ae604bfa3469dc7c114b4721
|
[
"Apache-2.0"
] | 7
|
2021-01-04T12:44:33.000Z
|
2022-02-19T05:15:24.000Z
|
from django.contrib.auth.models import Permission, User
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from axians_netbox_pdu.models import PDUConfig
from dcim.models import DeviceType, Manufacturer, PowerOutletTemplate
class PDUConfigListViewTestCase(TestCase):
"""Test PDUConfig List View"""
def setUp(self):
"""Create a user and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.url = reverse("plugins:axians_netbox_pdu:pduconfig_list")
self.manufacturer = Manufacturer.objects.create(name="Test", slug="test")
self.device_type = DeviceType.objects.create(slug="test", model="test", manufacturer=self.manufacturer)
self.outlets = PowerOutletTemplate.objects.create(device_type=self.device_type, name="1")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_list_pduconfig_anonymous(self):
"""Verify that PDUConfig can be listed without logging in if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "axians_netbox_pdu/pduconfig_list.html")
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_list_pduconfig(self):
"""Verify that PDUConfig can be listed by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="axians_netbox_pdu", codename="view_pduconfig")
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "axians_netbox_pdu/pduconfig_list.html")
class PDUConfigCreateViewTestCase(TestCase):
"""Test the PDUConfigCreateView view."""
def setUp(self):
"""Create a user and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.url = reverse("plugins:axians_netbox_pdu:pduconfig_add")
self.manufacturer = Manufacturer.objects.create(name="Test", slug="test")
self.device_type = DeviceType.objects.create(slug="test", model="test", manufacturer=self.manufacturer)
self.outlets = PowerOutletTemplate.objects.create(device_type=self.device_type, name="1")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_get_anonymous(self):
"""Verify that the view cannot be accessed by anonymous users even if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_get(self):
"""Verify that the view can be seen by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="axians_netbox_pdu", codename="add_pduconfig")
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "axians_netbox_pdu/pduconfig_edit.html")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_post_anonymous(self):
"""Verify that the view cannot be accessed by anonymous users even if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_post(self):
"""Verify that the view can be used by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.post(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="axians_netbox_pdu", codename="add_pduconfig")
)
response = self.client.post(
self.url, data={"device_type": "test", "power_usage_oid": "1.1.1.1", "power_usage_unit": "watts"}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(PDUConfig.objects.count(), 1)
class PDUConfigEditViewTestCase(TestCase):
"""Test the PDUConfigEditView view."""
def setUp(self):
"""Create a user and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.manufacturer = Manufacturer.objects.create(name="Test", slug="test")
self.device_type = DeviceType.objects.create(slug="test", model="test", manufacturer=self.manufacturer)
self.outlets = PowerOutletTemplate.objects.create(device_type=self.device_type, name="1")
self.pduconfig = PDUConfig.objects.create(
device_type=self.device_type, power_usage_oid="1.2.3.4", power_usage_unit="watts"
)
self.url = reverse("plugins:axians_netbox_pdu:pduconfig_edit", kwargs={"pk": self.pduconfig.pk})
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_get_edit_anonymous(self):
"""Verify that the view cannot be accessed by anonymous users even if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_edit_get(self):
"""Verify that the view can be seen by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="axians_netbox_pdu", codename="change_pduconfig")
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "axians_netbox_pdu/pduconfig_edit.html")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_post_edit_anonymous(self):
"""Verify that the view cannot be accessed by anonymous users even if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_edit_post(self):
"""Verify that the view can be used by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.post(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="axians_netbox_pdu", codename="change_pduconfig")
)
self.device_type_1 = DeviceType.objects.create(slug="test1", model="test1", manufacturer=self.manufacturer)
self.outlets_1 = PowerOutletTemplate.objects.create(device_type=self.device_type_1, name="1")
response = self.client.post(
self.url,
data={"device_type": self.device_type_1.slug, "power_usage_oid": "5.5.5.5", "power_usage_unit": "watts"},
)
self.pduconfig.refresh_from_db()
self.assertEqual(response.status_code, 302)
self.assertEqual(PDUConfig.objects.count(), 1)
self.assertEqual(self.pduconfig.power_usage_oid, "5.5.5.5")
class PDUConfigBulkDeleteViewTestCase(TestCase):
"""Test the PDUConfigBulkDeleteView view."""
def setUp(self):
"""Create a user and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.url = reverse("plugins:axians_netbox_pdu:pduconfig_bulk_delete")
self.manufacturer = Manufacturer.objects.create(name="Test", slug="test")
self.device_type = DeviceType.objects.create(slug="test", model="test", manufacturer=self.manufacturer)
self.device_type_1 = DeviceType.objects.create(slug="test1", model="test1", manufacturer=self.manufacturer)
self.outlets = PowerOutletTemplate.objects.create(device_type=self.device_type, name="1")
self.outlets_1 = PowerOutletTemplate.objects.create(device_type=self.device_type_1, name="1")
self.pduconfig_1 = PDUConfig.objects.create(
device_type=self.device_type, power_usage_oid="1.1.1.1", power_usage_unit="watts"
)
self.pduconfig_2 = PDUConfig.objects.create(
device_type=self.device_type_1, power_usage_oid="0.1.2.3", power_usage_unit="kilowatts"
)
@override_settings(EXEsMPT_VIEW_PERMISSIONS=["*"])
def test_post_anonymous(self):
"""Verify that the view cannot be accessed by anonymous users even if permissions are exempted."""
self.client.logout()
response = self.client.post(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_post(self):
"""Verify that the view can be seen by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.post(self.url, data={"pk": [self.pduconfig_1.pk], "confirm": True, "_confirm": True})
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="axians_netbox_pdu", codename="delete_pduconfig")
)
response = self.client.post(self.url, data={"pk": [self.pduconfig_1.pk], "confirm": True, "_confirm": True})
self.assertEqual(response.status_code, 302)
self.assertEqual(PDUConfig.objects.count(), 1)
class PDUConfigFeedBulkImportViewTestCase(TestCase):
"""Test the PDUConfigImportView view."""
def setUp(self):
"""Create a superuser and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.url = reverse("plugins:axians_netbox_pdu:pduconfig_import")
self.manufacturer = Manufacturer.objects.create(name="Test", slug="test")
self.device_type = DeviceType.objects.create(slug="test", model="test", manufacturer=self.manufacturer)
self.device_type_1 = DeviceType.objects.create(slug="test1", model="test1", manufacturer=self.manufacturer)
self.outlets = PowerOutletTemplate.objects.create(device_type=self.device_type, name="1")
self.outlets_1 = PowerOutletTemplate.objects.create(device_type=self.device_type_1, name="1")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_get_anonymous(self):
"""Verify that the import view cannot be seen by an anonymous user even if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_get(self):
"""Verify that the import view can be seen by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="axians_netbox_pdu", codename="add_pduconfig")
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "utilities/obj_bulk_import.html")
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_post(self):
"""Verify that tasks can be bulk-imported."""
csv_data = ["device_type,power_usage_oid,power_usage_unit", "test,0.1.2.3,watts", "test1,1.2.3.4,watts"]
# Attempt to access without permissions
response = self.client.post(self.url, data={"csv": "\n".join(csv_data)})
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="axians_netbox_pdu", codename="add_pduconfig")
)
response = self.client.post(self.url, data={"csv": "\n".join(csv_data)})
self.assertEqual(response.status_code, 200)
self.assertEqual(PDUConfig.objects.count(), len(csv_data) - 1)
| 45.030303
| 117
| 0.692089
| 1,629
| 13,374
| 5.509515
| 0.085328
| 0.044568
| 0.046128
| 0.074318
| 0.899387
| 0.890251
| 0.87922
| 0.867075
| 0.850585
| 0.836435
| 0
| 0.012534
| 0.194631
| 13,374
| 296
| 118
| 45.182432
| 0.820722
| 0.164723
| 0
| 0.730159
| 0
| 0
| 0.098603
| 0.039006
| 0
| 0
| 0
| 0
| 0.174603
| 1
| 0.10582
| false
| 0
| 0.042328
| 0
| 0.174603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ca3f23f349af1d7671379a184fe420b4616e9e7
| 5,141
|
py
|
Python
|
tests/test_fill.py
|
dcherian/pop-tools
|
1f484deb9372f5dd51372ace167ad8af3e297577
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fill.py
|
dcherian/pop-tools
|
1f484deb9372f5dd51372ace167ad8af3e297577
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fill.py
|
dcherian/pop-tools
|
1f484deb9372f5dd51372ace167ad8af3e297577
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
import xarray as xr
import pop_tools
from pop_tools import DATASETS
def test_lateral_fill_np_array():
# generate psuedo-data
dx, dy = 0.05, 0.05
y, x = np.mgrid[slice(1, 3 + dy, dy), slice(1, 5 + dx, dx)]
z_orig = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)
# construct mask and apply mask
valid_points = np.ones(z_orig.shape, dtype=np.bool)
valid_points = np.where(y < 0.5 * np.sin(5 * x) + 1.5, False, valid_points)
z_orig = np.where(~valid_points, np.nan, z_orig)
# add missing values
z_miss = z_orig.copy()
z_miss[:20, 62:] = np.nan
z_miss[15:18, 0:2] = 10.0
# compute lateral fill
z_fill = pop_tools.lateral_fill_np_array(z_miss, valid_points)
# load reference data
ref_data_file = DATASETS.fetch('lateral_fill_np_array_filled_ref.npz')
with np.load(ref_data_file) as data:
z_fill_ref = data['arr_0']
# assert that we match the reference solution
np.testing.assert_allclose(z_fill, z_fill_ref, atol=1e-14, equal_nan=True, verbose=True)
def test_lateral_fill_np_array_ltripole():
# generate psuedo-data
dx, dy = 0.05, 0.05
y, x = np.mgrid[slice(1 - dy, 3 + dy, dy), slice(1 - dx, 5 + dx, dx)]
z_orig = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)
# construct mask and apply mask
valid_points = np.ones(z_orig.shape, dtype=np.bool)
valid_points = np.where(y < 0.5 * np.sin(5 * x) + 1.5, False, valid_points)
z_orig = np.where(~valid_points, np.nan, z_orig)
# add missing values
z_miss = z_orig.copy()
z_miss[:20, 62:] = np.nan
z_miss[35:, 55:70] = np.nan
z_miss[15:18, 0:2] = 10.0
z_miss[-2:, 12:20] = 10.0
# compute lateral fill
z_fill = pop_tools.lateral_fill_np_array(z_miss, valid_points, ltripole=True)
# load reference data
ref_data_file = DATASETS.fetch('lateral_fill_np_array_tripole_filled_ref.npz')
with np.load(ref_data_file) as data:
z_fill_ref = data['arr_0']
# assert that we match the reference solution
np.testing.assert_allclose(z_fill, z_fill_ref, atol=1e-14, equal_nan=True, verbose=True)
def test_lateral_fill_2D():
ds = pop_tools.get_grid('POP_gx3v7')
field = ds.KMT.copy() * 1.0
field = field.where(ds.KMT > 0)
field.values[20:40, 80:] = np.nan
da_in = field
attrs = {'long_name': 'test field', 'units': 'none'}
da_in.attrs = attrs
valid_points = ds.KMT > 0
da_out = pop_tools.lateral_fill(da_in, valid_points)
assert (da_out.notnull() == valid_points).all()
assert da_out.attrs == attrs
def test_lateral_fill_3D():
ds = pop_tools.get_grid('POP_gx3v7')
field = ds.KMT.copy() * 1.0
field = field.where(ds.KMT > 0)
field.values[20:40, 80:] = np.nan
da_in = xr.DataArray(np.ones((3)), dims=('z_t')) * field
attrs = {'long_name': 'test field', 'units': 'none'}
da_in.attrs = attrs
valid_points = ds.KMT > 0
da_out = pop_tools.lateral_fill(da_in, valid_points)
for k in range(0, da_out.shape[0]):
if k == 0:
arr_0 = da_out[k, :, :]
continue
arr_i = da_out[k, :, :]
np.testing.assert_array_equal(arr_0, arr_i)
assert da_out.attrs == attrs
def test_lateral_fill_4D():
ds = pop_tools.get_grid('POP_gx3v7')
field = ds.KMT.copy() * 1.0
field = field.where(ds.KMT > 0)
field.values[20:40, 80:] = np.nan
da_in = (
xr.DataArray(np.ones((3)), dims=('time')) * xr.DataArray(np.ones((5)), dims=('z_t')) * field
)
attrs = {'long_name': 'test field', 'units': 'none'}
da_in.attrs = attrs
valid_points = ds.KMT > 0
da_out = pop_tools.lateral_fill(da_in, valid_points)
arr_0 = da_out[0, 0, :, :]
for k in range(0, da_out.shape[1]):
for l in range(0, da_out.shape[0]):
arr_i = da_out[l, k, :, :]
np.testing.assert_array_equal(arr_0, arr_i)
assert da_out.attrs == attrs
def test_lateral_fill_4D_3Dmask():
ds = pop_tools.get_grid('POP_gx3v7')
field = ds.KMT.copy() * 1.0
field = field.where(ds.KMT > 0)
field.values[20:40, 80:] = np.nan
da_in = (
xr.DataArray(np.ones((3)), dims=('time'))
* xr.DataArray(np.ones((len(ds.z_t))), dims=('z_t'))
* field
)
attrs = {'long_name': 'test field', 'units': 'none'}
da_in.attrs = attrs
# make 3D mask
nk = len(ds.z_t)
nj, ni = ds.KMT.shape
# make 3D array of 0:km
zero_to_km = xr.DataArray(np.arange(0, nk), dims=('z_t'))
ONES_3d = xr.DataArray(np.ones((nk, nj, ni)), dims=('z_t', 'nlat', 'nlon'))
ZERO_TO_KM = zero_to_km * ONES_3d
# mask out cells where k is below KMT
valid_points = ZERO_TO_KM.where(ZERO_TO_KM < ds.KMT)
valid_points = xr.where(valid_points.notnull(), True, False)
da_out = pop_tools.lateral_fill(da_in, valid_points)
for k in range(0, da_out.shape[1]):
for l in range(0, da_out.shape[0]):
if l == 0:
arr_0 = da_out[0, k, :, :]
arr_i = da_out[l, k, :, :]
np.testing.assert_array_equal(arr_0, arr_i)
assert da_out.attrs == attrs
| 30.241176
| 100
| 0.617973
| 884
| 5,141
| 3.364253
| 0.153846
| 0.077673
| 0.022192
| 0.036315
| 0.845999
| 0.828178
| 0.817418
| 0.817418
| 0.815736
| 0.802623
| 0
| 0.042861
| 0.233029
| 5,141
| 169
| 101
| 30.420118
| 0.711387
| 0.073916
| 0
| 0.612613
| 1
| 0
| 0.056691
| 0.01686
| 0
| 0
| 0
| 0
| 0.09009
| 1
| 0.054054
| false
| 0
| 0.045045
| 0
| 0.099099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4cd76385c0ef5c5ab2b0e940b43ff3170de9886d
| 7,074
|
py
|
Python
|
tasks-deploy/calc-1/check.py
|
irdkwmnsb/lkshl-ctf
|
e5c0200ddc8ba73df5f321b87b9763fb1bbaba57
|
[
"MIT"
] | 3
|
2021-03-30T06:27:58.000Z
|
2021-04-03T17:56:35.000Z
|
tasks-deploy/calc-1/check.py
|
irdkwmnsb/lkshl-ctf
|
e5c0200ddc8ba73df5f321b87b9763fb1bbaba57
|
[
"MIT"
] | null | null | null |
tasks-deploy/calc-1/check.py
|
irdkwmnsb/lkshl-ctf
|
e5c0200ddc8ba73df5f321b87b9763fb1bbaba57
|
[
"MIT"
] | null | null | null |
def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(True)
if attempt.answer in flags:
return CheckedPlagiarist(False, flags.index(attempt.answer))
return Checked(False)
flags = ['LKL{U_r_a_calculator_MpU6R52l}', 'LKL{U_r_a_calculator_ZCMMryOX}', 'LKL{U_r_a_calculator_zQhyTruO}', 'LKL{U_r_a_calculator_ezQkaKy7}', 'LKL{U_r_a_calculator_NLOxU80o}', 'LKL{U_r_a_calculator_ZRqFdjDu}', 'LKL{U_r_a_calculator_XjYHUaTM}', 'LKL{U_r_a_calculator_nE0ykZCK}', 'LKL{U_r_a_calculator_btndlIfS}', 'LKL{U_r_a_calculator_7QWvlR6c}', 'LKL{U_r_a_calculator_9tSUJxJR}', 'LKL{U_r_a_calculator_emqWF9ZK}', 'LKL{U_r_a_calculator_WYHXD8v2}', 'LKL{U_r_a_calculator_YOLlduNM}', 'LKL{U_r_a_calculator_ptsxQFPA}', 'LKL{U_r_a_calculator_TLkf2QwS}', 'LKL{U_r_a_calculator_vlIcMnuY}', 'LKL{U_r_a_calculator_Y566J0wP}', 'LKL{U_r_a_calculator_FfG17GYR}', 'LKL{U_r_a_calculator_8tuDaVZ3}', 'LKL{U_r_a_calculator_l8gqhKJT}', 'LKL{U_r_a_calculator_W23Bkmqk}', 'LKL{U_r_a_calculator_8hzyzRLY}', 'LKL{U_r_a_calculator_ZwGIX4uq}', 'LKL{U_r_a_calculator_44VieoKJ}', 'LKL{U_r_a_calculator_1ye3W1ML}', 'LKL{U_r_a_calculator_Yp8019yg}', 'LKL{U_r_a_calculator_Gk2JkJ4C}', 'LKL{U_r_a_calculator_JHbX3ASk}', 'LKL{U_r_a_calculator_lD8P7Lp8}', 'LKL{U_r_a_calculator_qkSoFmWY}', 'LKL{U_r_a_calculator_Y0KCVTkt}', 'LKL{U_r_a_calculator_TW3PzAaP}', 'LKL{U_r_a_calculator_81iw5rtU}', 'LKL{U_r_a_calculator_w28abHkD}', 'LKL{U_r_a_calculator_Ow77dCSQ}', 'LKL{U_r_a_calculator_IKNynsBN}', 'LKL{U_r_a_calculator_0lfzdKzk}', 'LKL{U_r_a_calculator_GlobM4T3}', 'LKL{U_r_a_calculator_kwRJhgFm}', 'LKL{U_r_a_calculator_Fy9vUbvM}', 'LKL{U_r_a_calculator_XGs8L5Fy}', 'LKL{U_r_a_calculator_LjFlC4MO}', 'LKL{U_r_a_calculator_PL50o9fb}', 'LKL{U_r_a_calculator_YYysC0jQ}', 'LKL{U_r_a_calculator_myCNc6dE}', 'LKL{U_r_a_calculator_Ualz1eyO}', 'LKL{U_r_a_calculator_4nZ83c1i}', 'LKL{U_r_a_calculator_oADuZBNQ}', 'LKL{U_r_a_calculator_M4zyLLla}', 'LKL{U_r_a_calculator_YplV7fiy}', 'LKL{U_r_a_calculator_C8rLUsXI}', 'LKL{U_r_a_calculator_afko5NDt}', 'LKL{U_r_a_calculator_Wt92erop}', 'LKL{U_r_a_calculator_0IyiGj4U}', 'LKL{U_r_a_calculator_DYoPcY8o}', 'LKL{U_r_a_calculator_YJZNKCjk}', 'LKL{U_r_a_calculator_hzZaGpr1}', 'LKL{U_r_a_calculator_hoFOX7lK}', 'LKL{U_r_a_calculator_kWRAOpM3}', 'LKL{U_r_a_calculator_va8CToeH}', 'LKL{U_r_a_calculator_0IXPBSaV}', 'LKL{U_r_a_calculator_1PHgH0OJ}', 'LKL{U_r_a_calculator_zegEgcRK}', 'LKL{U_r_a_calculator_X0T92Ygd}', 'LKL{U_r_a_calculator_oH8uCjbo}', 'LKL{U_r_a_calculator_hNmOEMOl}', 'LKL{U_r_a_calculator_2bzSp4o8}', 'LKL{U_r_a_calculator_ZNizwyNt}', 'LKL{U_r_a_calculator_ODAJpXRI}', 'LKL{U_r_a_calculator_yhNcuMFr}', 'LKL{U_r_a_calculator_SUPcmZW5}', 'LKL{U_r_a_calculator_a3zhUcV0}', 'LKL{U_r_a_calculator_y9MErPu2}', 'LKL{U_r_a_calculator_FnoDGV1D}', 'LKL{U_r_a_calculator_BzZ9jOhb}', 'LKL{U_r_a_calculator_NtWTFvAH}', 'LKL{U_r_a_calculator_yAIiqPGQ}', 'LKL{U_r_a_calculator_PLLuo3xC}', 'LKL{U_r_a_calculator_GBmyws3v}', 'LKL{U_r_a_calculator_L8qcd6Ee}', 'LKL{U_r_a_calculator_gOavcaOr}', 'LKL{U_r_a_calculator_72Fpv8Nd}', 'LKL{U_r_a_calculator_XdYIByzj}', 'LKL{U_r_a_calculator_AoNTqkfl}', 'LKL{U_r_a_calculator_dEce2p7p}', 'LKL{U_r_a_calculator_ZTZpMU3W}', 'LKL{U_r_a_calculator_0LJoj4r2}', 'LKL{U_r_a_calculator_hvpnch9A}', 'LKL{U_r_a_calculator_hPtJpqbj}', 'LKL{U_r_a_calculator_kcUQsW0T}', 'LKL{U_r_a_calculator_oMSi6i7M}', 'LKL{U_r_a_calculator_7U98J5FF}', 'LKL{U_r_a_calculator_iWfQruwm}', 'LKL{U_r_a_calculator_WTJX7WGp}', 'LKL{U_r_a_calculator_PXUpDq40}', 'LKL{U_r_a_calculator_ffmtLw16}', 'LKL{U_r_a_calculator_B6P51HVa}', 'LKL{U_r_a_calculator_LjPAx2yL}', 'LKL{U_r_a_calculator_kFF5T4sh}', 'LKL{U_r_a_calculator_i9YfQRXC}', 'LKL{U_r_a_calculator_nAfXOzoO}', 'LKL{U_r_a_calculator_l19GSBTQ}', 'LKL{U_r_a_calculator_Wq72rKjA}', 'LKL{U_r_a_calculator_vGulPu0L}', 'LKL{U_r_a_calculator_31LfVROW}', 'LKL{U_r_a_calculator_Bg5w4bPr}', 'LKL{U_r_a_calculator_CotNy2m6}', 'LKL{U_r_a_calculator_FzCOV43h}', 'LKL{U_r_a_calculator_2FLo2IqC}', 'LKL{U_r_a_calculator_wgzIk4JM}', 'LKL{U_r_a_calculator_CAbTKm5l}', 'LKL{U_r_a_calculator_bJLYDIjv}', 'LKL{U_r_a_calculator_2SuV5Djb}', 'LKL{U_r_a_calculator_UAbkF5Qt}', 'LKL{U_r_a_calculator_LkphePoV}', 'LKL{U_r_a_calculator_TkoSBtQM}', 'LKL{U_r_a_calculator_KnO8ep7Z}', 'LKL{U_r_a_calculator_8kcWBc3E}', 'LKL{U_r_a_calculator_txPoYyDU}', 'LKL{U_r_a_calculator_qqWH5yDZ}', 'LKL{U_r_a_calculator_oLT4bj61}', 'LKL{U_r_a_calculator_bsgHv5vZ}', 'LKL{U_r_a_calculator_7VVEdmRg}', 'LKL{U_r_a_calculator_CUsV9aRJ}', 'LKL{U_r_a_calculator_ztKpinpx}', 'LKL{U_r_a_calculator_O5D53Xqr}', 'LKL{U_r_a_calculator_N0SkN3y4}', 'LKL{U_r_a_calculator_9RHNDx5Y}', 'LKL{U_r_a_calculator_IKHMdLJB}', 'LKL{U_r_a_calculator_RNsVjaae}', 'LKL{U_r_a_calculator_qKqrhLXJ}', 'LKL{U_r_a_calculator_9BLxNEdM}', 'LKL{U_r_a_calculator_MbXq5JC8}', 'LKL{U_r_a_calculator_CC77dsQ2}', 'LKL{U_r_a_calculator_P5ymw6CI}', 'LKL{U_r_a_calculator_OU3xO04b}', 'LKL{U_r_a_calculator_Y2s9hwqs}', 'LKL{U_r_a_calculator_AxeV9xVY}', 'LKL{U_r_a_calculator_UQZjqa81}', 'LKL{U_r_a_calculator_54dUN6Fd}', 'LKL{U_r_a_calculator_3XaAyjyo}', 'LKL{U_r_a_calculator_PakrVnJY}', 'LKL{U_r_a_calculator_UAgZQ2KG}', 'LKL{U_r_a_calculator_Av4FobCD}', 'LKL{U_r_a_calculator_z1oCuBvl}', 'LKL{U_r_a_calculator_RoIReUJ6}', 'LKL{U_r_a_calculator_XKBhtBaD}', 'LKL{U_r_a_calculator_geZpphgm}', 'LKL{U_r_a_calculator_5oYclsUG}', 'LKL{U_r_a_calculator_DYB4axYQ}', 'LKL{U_r_a_calculator_95ZSSocI}', 'LKL{U_r_a_calculator_BRmjWxOT}', 'LKL{U_r_a_calculator_zRcqvI5c}', 'LKL{U_r_a_calculator_LTPEDTgw}', 'LKL{U_r_a_calculator_tDJbMaOe}', 'LKL{U_r_a_calculator_N7A5y2Bh}', 'LKL{U_r_a_calculator_6U1pjBpG}', 'LKL{U_r_a_calculator_4aWt3Pwm}', 'LKL{U_r_a_calculator_R7m6W7GY}', 'LKL{U_r_a_calculator_FNUv7kjY}', 'LKL{U_r_a_calculator_h5FJKOTN}', 'LKL{U_r_a_calculator_BpKQW2jo}', 'LKL{U_r_a_calculator_oOcmFXbu}', 'LKL{U_r_a_calculator_0udlsiLR}', 'LKL{U_r_a_calculator_UtLKrM5Z}', 'LKL{U_r_a_calculator_IezKPuQo}', 'LKL{U_r_a_calculator_gOLFbOkl}', 'LKL{U_r_a_calculator_yK3oYGS7}', 'LKL{U_r_a_calculator_pYKf4yrG}', 'LKL{U_r_a_calculator_xHOeaHmu}', 'LKL{U_r_a_calculator_S7nttQ2d}', 'LKL{U_r_a_calculator_DVDlDC31}', 'LKL{U_r_a_calculator_FXMYbLbl}', 'LKL{U_r_a_calculator_vM1SUASp}', 'LKL{U_r_a_calculator_W0r0GiMM}', 'LKL{U_r_a_calculator_VA3vjxXb}', 'LKL{U_r_a_calculator_qsfd2cWP}', 'LKL{U_r_a_calculator_8VMjXdk0}', 'LKL{U_r_a_calculator_b9lgiQXh}', 'LKL{U_r_a_calculator_YVWtWb4I}', 'LKL{U_r_a_calculator_oDlVi728}', 'LKL{U_r_a_calculator_4FbLz39S}', 'LKL{U_r_a_calculator_DgYVWGF5}', 'LKL{U_r_a_calculator_wocR5QIX}', 'LKL{U_r_a_calculator_85FxZol2}', 'LKL{U_r_a_calculator_G7Sp8MJH}', 'LKL{U_r_a_calculator_tbOSCbqQ}', 'LKL{U_r_a_calculator_z7DHfLPs}', 'LKL{U_r_a_calculator_8UeFRm78}', 'LKL{U_r_a_calculator_OdO4c1ez}', 'LKL{U_r_a_calculator_TDhDo1wn}', 'LKL{U_r_a_calculator_1UVroyQp}', 'LKL{U_r_a_calculator_ACeiavV2}', 'LKL{U_r_a_calculator_iYaUikUR}', 'LKL{U_r_a_calculator_UKxpq2Pb}', 'LKL{U_r_a_calculator_37JUTyXf}', 'LKL{U_r_a_calculator_L34Knhkp}', 'LKL{U_r_a_calculator_bYDHgT35}', 'LKL{U_r_a_calculator_jK8xGcVY}']
| 884.25
| 6,808
| 0.817501
| 1,232
| 7,074
| 4.044643
| 0.180195
| 0.160546
| 0.200682
| 0.240819
| 0.642183
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038162
| 0.036896
| 7,074
| 8
| 6,808
| 884.25
| 0.693234
| 0
| 0
| 0
| 0
| 0
| 0.848896
| 0.848896
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
4cde1541845f5700366234e4be007390ca4f97f7
| 228
|
py
|
Python
|
data/util/read_keys.py
|
dongmeic/RTP
|
b158b61d69fc1509ea939126916bef21865fbcca
|
[
"MIT"
] | null | null | null |
data/util/read_keys.py
|
dongmeic/RTP
|
b158b61d69fc1509ea939126916bef21865fbcca
|
[
"MIT"
] | null | null | null |
data/util/read_keys.py
|
dongmeic/RTP
|
b158b61d69fc1509ea939126916bef21865fbcca
|
[
"MIT"
] | null | null | null |
import json
def read_usr(path):
with open(path, 'r') as f:
keys = json.load(f)
return keys['ArcGIS_Online']['usr']
def read_pwd(path):
with open(path, 'r') as f:
keys = json.load(f)
return keys['ArcGIS_Online']['pwd']
| 19
| 36
| 0.657895
| 40
| 228
| 3.65
| 0.425
| 0.09589
| 0.164384
| 0.219178
| 0.753425
| 0.753425
| 0.753425
| 0.753425
| 0.753425
| 0.753425
| 0
| 0
| 0.157895
| 228
| 11
| 37
| 20.727273
| 0.760417
| 0
| 0
| 0.444444
| 0
| 0
| 0.149123
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e2693534412de9732960d0cc8e5bec7faab51963
| 188
|
py
|
Python
|
spark_dependencies/python_lib/digSparkUtil/miscUtil.py
|
usc-isi-i2/WEDC
|
cf48355d8a5c6616fb34be9932520875e218d2c4
|
[
"Apache-2.0"
] | 2
|
2015-12-18T00:37:46.000Z
|
2016-05-27T19:52:53.000Z
|
spark_dependencies/python_lib/digSparkUtil/miscUtil.py
|
usc-isi-i2/WEDC
|
cf48355d8a5c6616fb34be9932520875e218d2c4
|
[
"Apache-2.0"
] | null | null | null |
spark_dependencies/python_lib/digSparkUtil/miscUtil.py
|
usc-isi-i2/WEDC
|
cf48355d8a5c6616fb34be9932520875e218d2c4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Filename: miscUtil.py
from datetime import datetime
def seconds_since_epoch():
return int(round((datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()))
| 20.888889
| 81
| 0.723404
| 26
| 188
| 5.115385
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036585
| 0.12766
| 188
| 8
| 82
| 23.5
| 0.77439
| 0.223404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
e2707522e084b5789bd477e29b009eba5a07173f
| 39,539
|
py
|
Python
|
gym_environments/gym_shadow_hand/envs/manipulate_env.py
|
szahlner/shadow-teleop
|
360c7d7c2586e9295c45fca0b4850b43d230bcda
|
[
"MIT"
] | 1
|
2022-03-02T20:27:20.000Z
|
2022-03-02T20:27:20.000Z
|
gym_environments/gym_shadow_hand/envs/manipulate_env.py
|
szahlner/shadow-teleop
|
360c7d7c2586e9295c45fca0b4850b43d230bcda
|
[
"MIT"
] | null | null | null |
gym_environments/gym_shadow_hand/envs/manipulate_env.py
|
szahlner/shadow-teleop
|
360c7d7c2586e9295c45fca0b4850b43d230bcda
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pybullet
from gym import spaces
from gym import utils
from collections import OrderedDict
from gym_shadow_hand.envs.pybullet_env import PyBulletEnv, PyBulletGoalEnv
from gym_shadow_hand.envs import rotations
from scipy.spatial.transform import Rotation as R
MODEL_PATH = "shadow_hand_right.urdf"
BLOCK_PATH = "block.obj"
BLOCK_TEXTURE_PATH = "block.png"
BLOCK_LENGTH = 0.04 # Block length in metres
MOVABLE_JOINTS = [b"rh_FFJ4", b"rh_FFJ3", b"rh_FFJ2",
b"rh_MFJ4", b"rh_MFJ3", b"rh_MFJ2",
b"rh_RFJ4", b"rh_RFJ3", b"rh_RFJ2",
b"rh_LFJ5", b"rh_LFJ4", b"rh_LFJ3", b"rh_LFJ2",
b"rh_THJ5", b"rh_THJ4", b"rh_THJ3", b"rh_THJ2", b"rh_THJ1",
b"rh_WRJ2", b"rh_WRJ1"]
COUPLED_JOINTS = {b"rh_FFJ1": b"rh_FFJ2",
b"rh_MFJ1": b"rh_MFJ2",
b"rh_RFJ1": b"rh_RFJ2",
b"rh_LFJ1": b"rh_LFJ2"}
PALM_LINK = [b"rh_palm"]
INITIAL_POS = {b"rh_WRJ2": -0.05866723135113716,
b"rh_WRJ1": 0.08598895370960236,
b"rh_FFJ4": -0.05925952824065458,
b"rh_FFJ3": 0.0,
b"rh_FFJ2": 0.5306965075027753,
b"rh_FFJ1": 0.5306965075027753,
b"rh_MFJ4": 0.015051404275727428,
b"rh_MFJ3": 0.0,
b"rh_MFJ2": 0.5364634589883859,
b"rh_MFJ1": 0.5364634589883859,
b"rh_RFJ4": -0.056137955514170744,
b"rh_RFJ3": 0.0,
b"rh_RFJ2": 0.5362351077308591,
b"rh_RFJ1": 0.5362351077308591,
b"rh_LFJ5": 0.0,
b"rh_LFJ4": -0.216215152247765,
b"rh_LFJ3": 0.0,
b"rh_LFJ2": 0.542813974505131,
b"rh_LFJ1": 0.542813974505131,
b"rh_THJ5": 1.047,
b"rh_THJ4": 0.4912634677627796,
b"rh_THJ3": 0.209,
b"rh_THJ2": -0.024347361541391634,
b"rh_THJ1": 0.28372550178530886}
GOAL_ORIENTATION = [0.] * 3
def goal_distance(goal_a, goal_b):
"""Distance to goal.
:param goal_a: (numpy.array) Achieved_goal (position, orientaiton).
:param goal_b: (numpy.array) Desired_goal (position, orientaiton).
:return: (float) Distance to goal.
"""
goal_a = goal_a[3:]
goal_b = goal_b[3:]
if goal_a.shape[0] == 6:
goal_a = continuous6D_to_quaternion(goal_a)
goal_b = continuous6D_to_quaternion(goal_b)
goal_a = np.array(pybullet.getEulerFromQuaternion(goal_a))
goal_b = np.array(pybullet.getEulerFromQuaternion(goal_b))
# Ignore y orientation
# goal_a = np.delete(np.array(goal_a), 1)
# goal_b = np.delete(np.array(goal_b), 1)
# Account double sided block/cube
# if goal_a[0] > np.pi / 2.:
# goal_a[0] = np.pi - goal_a[0]
# if goal_a[0] < -np.pi / 2.:
# goal_a[0] = -np.pi - goal_a[0]
# if goal_a[1] > np.pi / 2.:
# goal_a[1] = np.pi - goal_a[1]
# if goal_a[1] < -np.pi / 2.:
# goal_a[1] = -np.pi - goal_a[1]
# return np.sum(np.abs(goal_a - goal_b))
goal_a[1] = goal_b[1]
goal_a = rotations.euler2quat(goal_a)
goal_b = rotations.euler2quat(goal_b)
quaternion_diff = rotations.quat_mul(goal_a, rotations.quat_conjugate(goal_b))
angle_diff = 2 * np.arccos(np.clip(quaternion_diff[..., 0], -1., 1.))
return angle_diff
def quaternion_to_continuous6D(orientation):
"""Mapping from SO(3) to 6D representation.
Zhou et al. "On the Continuity of Rotation Representations in Neural Networks", arXiv:1812.07035v4.
:param orientation: (list) Quaternion.
:return: (list) 6D representation of SO(3).
"""
rot_mat = pybullet.getMatrixFromQuaternion(orientation)
return rot_mat[:6]
def continuous6D_to_quaternion(orientation):
"""Mapping from 6D representation to SO(3).
Zhou et al. "On the Continuity of Rotation Representations in Neural Networks", arXiv:1812.07035v4.
:param orientation: (numpy.array) 6D representation of SO(3).
:return: (numpy.array) Quaternion.
"""
b1 = orientation[:3] / np.linalg.norm(orientation[:3])
b2_ = orientation[3:] - np.dot(b1, orientation[3:]) * b1
b2 = b2_ / np.linalg.norm(b2_)
b3 = np.cross(b1, b2)
return R.from_matrix(np.array([b1, b2, b3])).as_quat()
class ShadowHandManipulateBlockEnv(PyBulletEnv, utils.EzPickle):
"""Shadow Hand Manipulate environment.
:param orientation_threshold: (float) Threshold to be used.
:param reward_type: (str) Reward type (dense or sparse).
:param couple_factor: (list[float * 4]) Joint coupling between Distal and Intermediate phalanges joint (Range: 0-1).
:param object_path: (str) Path of object to manipulate.
:param block_length: (float) Block length in metres.
:param model_path: (str) Path of simulation model.
:param initial_pos: (dict) Initial model position, {"joint_name": position}.
:param sim_time_step: (int) Time step to simulate.
:param sim_frames_skip: (int) How many frames should be skipped.
:param sim_n_sub_steps: (int) Sub-steps to be taken.
:param sim_self_collision: (PyBullet.flag) Collision used in model.
:param render: (bool) Should render or not.
:param render_options: (PyBullet.flag) Render options for PyBullet.
"""
def __init__(self,
orientation_threshold=0.1,
reward_type="sparse",
orientation_type="quaternions",
max_steps_per_episode=99,
position_gain=0.02,
couple_factor=None,
object_path=BLOCK_PATH,
object_texture_path=BLOCK_TEXTURE_PATH,
model_path=MODEL_PATH,
block_length=BLOCK_LENGTH,
initial_pos=INITIAL_POS,
sim_time_step=1.0/240.0,
sim_frames_skip=10,
sim_n_sub_steps=1,
sim_self_collision=pybullet.URDF_USE_SELF_COLLISION,
render=False,
render_options=None):
assert reward_type in ["sparse", "dense"], "reward type must be 'sparse' or 'dense'"
self.reward_type = reward_type
assert orientation_type in ["quaternions", "6D"], "orientation type must be 'quaternions' or '6D'"
self.orientation_type = orientation_type
assert block_length > 0, "Block length must be greater than 0"
self.block_length = block_length / 2
self.current_episode_steps = 0
self.max_steps_per_episode = max_steps_per_episode
self.position_gain = position_gain
super(ShadowHandManipulateBlockEnv, self).__init__(model_path=model_path,
initial_pos=initial_pos,
sim_time_step=sim_time_step,
sim_frames_skip=sim_frames_skip,
sim_n_sub_steps=sim_n_sub_steps,
sim_self_collision=sim_self_collision,
render=render,
render_options=render_options)
utils.EzPickle.__init__(**locals())
# Joint coupling
if couple_factor is None:
self.couple_factor = np.array([1.] * len(COUPLED_JOINTS))
else:
self.couple_factor = couple_factor
# Base position and orientation
self.base_start_pos = [0.] * 3
self.base_start_orientation = pybullet.getQuaternionFromEuler([0.] * 3)
self.orientation_threshold = orientation_threshold
# Object path
if object_path.startswith("/"):
full_path = object_path
else:
full_path = os.path.join(os.path.dirname(__file__), "assets", "obj", object_path)
if not os.path.exists(full_path):
raise FileNotFoundError("File {} does not exist".format(full_path))
self.object_path = full_path
self.object_id = None
# Object texture
if object_texture_path.startswith("/"):
full_path = object_texture_path
else:
full_path = os.path.join(os.path.dirname(__file__), "assets", "materials", "textures", object_texture_path)
if not os.path.exists(full_path):
raise FileNotFoundError("File {} does not exist".format(full_path))
self.object_texture_path = full_path
self.palm_pos = None
def set_action_space(self):
"""Set action space.
Iterate over all available joints to determine the count.
"""
n_actions = 0
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[1] in MOVABLE_JOINTS:
n_actions += 1
action_space = spaces.Box(low=-1., high=1., shape=(n_actions,), dtype=np.float64)
return action_space
def set_observation_space(self):
"""Set observation space."""
observation = self.get_observation()
n_states = len(observation)
observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(n_states,), dtype=np.float64)
return observation_space
def reset_simulation(self):
"""Reset simulation.
Reset itself is done in parent class.
Load all necessary models and set start positions.
"""
# Set gravity and time-step
self.physics_client.setGravity(0., 9.81, 0.)
self.physics_client.setTimeStep(self.sim_time_step)
# Load robot-model
if self.sim_self_collision:
self.model_id = self.physics_client.loadURDF(fileName=self.model_path,
basePosition=self.base_start_pos,
baseOrientation=self.base_start_orientation,
flags=pybullet.URDF_USE_SELF_COLLISION)
else:
self.model_id = self.physics_client.loadURDF(fileName=self.model_path,
basePosition=self.base_start_pos,
baseOrientation=self.base_start_orientation)
self.set_initial_pos()
self.palm_pos = self.get_palm_position()
# Load object-model
object_start_position = self.palm_pos + np.array([0., -0.05, 0.06])
angle = self.np_random.uniform(-np.pi, np.pi)
axis = self.np_random.uniform(-1., 1., size=3)
object_start_orientation = pybullet.getQuaternionFromAxisAngle(axis, angle)
visual_shape_id = self.physics_client.createVisualShape(fileName=self.object_path,
shapeType=pybullet.GEOM_MESH,
rgbaColor=None,
meshScale=[self.block_length] * 3)
collision_shape_id = self.physics_client.createCollisionShape(fileName=self.object_path,
shapeType=pybullet.GEOM_MESH,
meshScale=[self.block_length] * 3)
texture_id = self.physics_client.loadTexture(self.object_texture_path)
self.object_id = self.physics_client.createMultiBody(baseMass=0.1,
baseVisualShapeIndex=visual_shape_id,
baseCollisionShapeIndex=collision_shape_id,
basePosition=object_start_position.tolist(),
baseOrientation=object_start_orientation)
self.physics_client.changeVisualShape(self.object_id, -1, textureUniqueId=texture_id)
self.goal = self.sample_goal()
self.current_episode_steps = 0
observation = self.get_observation()
return observation
def get_palm_position(self):
"""Get current palm position."""
palm_pos = []
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[12] in PALM_LINK:
link_state = self.physics_client.getLinkState(self.model_id, n)
palm_pos.append(link_state[0])
return np.array(palm_pos[0]).copy()
def get_observation(self):
"""Get observations.
Iterate over all movable joints and get the positions and velocities.
"""
robot_joint_pos = []
robot_joint_vel = []
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[1] in MOVABLE_JOINTS:
joint_state = self.physics_client.getJointState(self.model_id, n)
robot_joint_pos.append(joint_state[0])
robot_joint_vel.append(joint_state[1])
cur_pos, cur_orientation, cur_lin_vel, cur_ang_vel = self.get_current_object_state()
observation = np.concatenate([robot_joint_pos,
robot_joint_vel,
cur_lin_vel,
cur_ang_vel,
cur_pos,
cur_orientation,
self.goal.copy()])
return observation.copy()
def step(self, action):
"""Perform (a) simulation step(s).
Move movable joints within their range (-1, 1).
Do the actual simulation.
Calculate the environment stuff (observation, reward, done, info).
"""
# Clip action values
action = np.clip(action, self.action_space.low, self.action_space.high)
joint_limit_low = []
joint_limit_high = []
joints_movable = []
joints_coupled = []
# Get joint limits and distinct between movable and coupled joints
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[1] in MOVABLE_JOINTS:
joint_limit_low.append(joint_info[8])
joint_limit_high.append(joint_info[9])
joints_movable.append(n)
elif joint_info[1] in COUPLED_JOINTS:
joints_coupled.append(n)
joint_limit_low = np.array(joint_limit_low)
joint_limit_high = np.array(joint_limit_high)
act_range = (joint_limit_high - joint_limit_low) / 2.
act_center = (joint_limit_high + joint_limit_low) / 2.
# Calculate the control action
ctrl = act_center + act_range * action
ctrl = np.clip(ctrl, joint_limit_low, joint_limit_high)
# Actually move the joints
for n in range(self.n_model_joints):
if n in joints_movable:
k = joints_movable.index(n)
self.physics_client.setJointMotorControl2(bodyUniqueId=self.model_id,
jointIndex=joints_movable[k],
controlMode=pybullet.POSITION_CONTROL,
targetPosition=ctrl[k],
positionGain=self.position_gain)
else:
if n in joints_coupled and n - 1 in joints_movable:
k = joints_movable.index(n - 1)
self.physics_client.setJointMotorControl2(bodyUniqueId=self.model_id,
jointIndex=n,
controlMode=pybullet.POSITION_CONTROL,
targetPosition=ctrl[k] * self.couple_factor[
joints_coupled.index(n)],
positionGain=self.position_gain)
else:
self.physics_client.setJointMotorControl2(bodyUniqueId=self.model_id,
jointIndex=n,
controlMode=pybullet.POSITION_CONTROL,
targetPosition=0.,
positionGain=self.position_gain)
self.do_simulation()
cur_pos, cur_orientation, _, _ = self.get_current_object_state()
goal = self.goal.copy()
observation = self.get_observation()
cur_state = np.concatenate([cur_pos, cur_orientation])
done = self.is_success(cur_state, goal)
info = {"is_success": done}
reward = self.compute_reward(cur_state, goal, info)
# Always set done to false to achieve stable end-position
# Only change it if max episode steps are reached OR block fell off
done = False
if not done and self.current_episode_steps == self.max_steps_per_episode:
done = True
# Block fell off
palm_pos = self.get_palm_position()
if cur_pos[1] > palm_pos[1] + 0.04:
# Huge negative reward for this action - Nope
# reward -= 10000.
# done = True
info["is_success"] = False
self.current_episode_steps += 1
return observation, reward, done, info
def set_initial_pos(self):
"""Set initial position."""
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[1] in MOVABLE_JOINTS or joint_info[1] in COUPLED_JOINTS:
self.physics_client.setJointMotorControl2(bodyUniqueId=self.model_id,
jointIndex=n,
controlMode=pybullet.POSITION_CONTROL,
targetPosition=self.initial_pos[joint_info[1]],
positionGain=self.position_gain)
# Settle in
for _ in range(20):
self.do_simulation()
def sample_goal(self):
"""Set goal."""
orientation = pybullet.getQuaternionFromEuler(GOAL_ORIENTATION)
if self.orientation_type == "6D":
orientation = quaternion_to_continuous6D(orientation)
object_pos, _, _, _ = self.get_current_object_state()
return np.concatenate([object_pos, np.array(orientation)]).copy()
def is_success(self, achieved_goal, desired_goal):
"""Goal distance.
Distance between achieved_goal (current orientation) and goal.
"""
# Orientation
distance = goal_distance(achieved_goal, desired_goal)
return (distance < self.orientation_threshold).astype(np.float32)
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute reward.
Chose between dense and sparse.
"""
if self.reward_type == "sparse":
return self.is_success(achieved_goal, desired_goal) - 1
else:
distance = goal_distance(achieved_goal, desired_goal)
return -distance
def get_current_object_state(self):
"""Get position and rotation of the block."""
try:
object_position, object_orientation = self.physics_client.getBasePositionAndOrientation(self.object_id)
object_velocity = self.physics_client.getBaseVelocity(self.object_id)
object_linear_velocity = object_velocity[0]
object_angular_velocity = object_velocity[1]
except AttributeError:
object_position = [0.] * 3
object_orientation = pybullet.getQuaternionFromEuler([0.] * 3)
object_linear_velocity = [0.] * 3
object_angular_velocity = [0.] * 3
if self.orientation_type == "6D":
object_orientation = quaternion_to_continuous6D(object_orientation)
return np.array(object_position).copy(), np.array(object_orientation).copy(), \
np.array(object_linear_velocity).copy(), np.array(object_angular_velocity).copy()
def render(self, mode="human", close=False):
if mode == "rgb_array":
# Camera defaults
camera_view_matrix = pybullet.computeViewMatrix(cameraEyePosition=[0.4, -0.35, 0.5],
cameraTargetPosition=[0., 0., 0.3],
cameraUpVector=[-1., 0., -1.])
camera_projection_matrix = pybullet.computeProjectionMatrixFOV(fov=45., aspect=1., nearVal=0.1,
farVal=1.1)
img = self.physics_client.getCameraImage(width=512, height=512,
viewMatrix=camera_view_matrix,
projectionMatrix=camera_projection_matrix)
return img[2]
else:
pass
class ShadowHandManipulateBlockGoalEnv(PyBulletGoalEnv, utils.EzPickle):
"""Shadow Hand Manipulate goal environment.
Used for HER environments.
:param orientation_threshold: (float) Threshold to be used.
:param reward_type: (str) Reward type (dense or sparse).
:param couple_factor: (list[float * 4]) Joint coupling between Distal and Intermediate phalanges joint (Range: 0-1).
:param object_path: (str) Path of object to manipulate.
:param model_path: (str) Path of simulation model.
:param block_length: (float) Block length in metres.
:param initial_pos: (dict) Initial model position, {"joint_name": position}.
:param sim_time_step: (int) Time step to simulate.
:param sim_frames_skip: (int) How many frames should be skipped.
:param sim_n_sub_steps: (int) Sub-steps to be taken.
:param sim_self_collision: (PyBullet.flag) Collision used in model.
:param render: (bool) Should render or not.
:param render_options: (PyBullet.flag) Render options for PyBullet.
"""
def __init__(self,
orientation_threshold=0.1,
reward_type="sparse",
orientation_type="quaternions",
max_steps_per_episode=99,
position_gain=0.02,
couple_factor=None,
object_path=BLOCK_PATH,
object_texture_path=BLOCK_TEXTURE_PATH,
model_path=MODEL_PATH,
block_length=BLOCK_LENGTH,
initial_pos=INITIAL_POS,
sim_time_step=1.0/240.0,
sim_frames_skip=10,
sim_n_sub_steps=1,
sim_self_collision=pybullet.URDF_USE_SELF_COLLISION,
render=False,
render_options=None):
assert reward_type in ["sparse", "dense"], "reward type must be 'sparse' or 'dense'"
self.reward_type = reward_type
assert orientation_type in ["quaternions", "6D"], "orientation type must be 'quaternions' or '6D'"
self.orientation_type = orientation_type
assert block_length > 0, "Block length must be greater than 0"
self.block_length = block_length / 2
self.current_episode_steps = 0
self.max_steps_per_episode = max_steps_per_episode
self.position_gain = position_gain
super(ShadowHandManipulateBlockGoalEnv, self).__init__(model_path=model_path,
initial_pos=initial_pos,
sim_time_step=sim_time_step,
sim_frames_skip=sim_frames_skip,
sim_n_sub_steps=sim_n_sub_steps,
sim_self_collision=sim_self_collision,
render=render,
render_options=render_options)
utils.EzPickle.__init__(**locals())
# Joint coupling
if couple_factor is None:
self.couple_factor = np.array([1.] * len(COUPLED_JOINTS))
else:
self.couple_factor = couple_factor
# Base position and orientation
self.base_start_pos = [0.] * 3
self.base_start_orientation = pybullet.getQuaternionFromEuler([0.] * 3)
self.orientation_threshold = orientation_threshold
# Object path
if object_path.startswith("/"):
full_path = object_path
else:
full_path = os.path.join(os.path.dirname(__file__), "assets", "obj", object_path)
if not os.path.exists(full_path):
raise FileNotFoundError("File {} does not exist".format(full_path))
self.object_path = full_path
self.object_id = None
# Object texture
if object_texture_path.startswith("/"):
full_path = object_texture_path
else:
full_path = os.path.join(os.path.dirname(__file__), "assets", "materials", "textures", object_texture_path)
if not os.path.exists(full_path):
raise FileNotFoundError("File {} does not exist".format(full_path))
self.object_texture_path = full_path
self.palm_pos = None
def set_action_space(self):
"""Set action space.
Iterate over all available joints to determine the count.
"""
n_actions = 0
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[1] in MOVABLE_JOINTS:
n_actions += 1
action_space = spaces.Box(low=-1., high=1., shape=(n_actions,), dtype=np.float64)
return action_space
def set_observation_space(self):
"""Set observation space.
Note: HER style.
"""
observation = self.get_observation()
observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=observation["achieved_goal"].shape, dtype=np.float64),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=observation["achieved_goal"].shape, dtype=np.float64),
observation=spaces.Box(-np.inf, np.inf, shape=observation["observation"].shape, dtype=np.float64)
))
return observation_space
def reset_simulation(self):
"""Reset simulation.
Reset itself is done in parent class.
Load all necessary models and set start positions.
"""
# Set gravity and time-step
self.physics_client.setGravity(0., 9.81, 0.)
self.physics_client.setTimeStep(self.sim_time_step)
# Load robot-model
if self.sim_self_collision:
self.model_id = self.physics_client.loadURDF(fileName=self.model_path,
basePosition=self.base_start_pos,
baseOrientation=self.base_start_orientation,
flags=pybullet.URDF_USE_SELF_COLLISION)
else:
self.model_id = self.physics_client.loadURDF(fileName=self.model_path,
basePosition=self.base_start_pos,
baseOrientation=self.base_start_orientation)
self.set_initial_pos()
self.palm_pos = self.get_palm_position()
# Load object-model
object_start_position = self.palm_pos + np.array([0., -0.05, 0.06])
angle = self.np_random.uniform(-np.pi, np.pi)
axis = self.np_random.uniform(-1., 1., size=3)
object_start_orientation = pybullet.getQuaternionFromAxisAngle(axis, angle)
visual_shape_id = self.physics_client.createVisualShape(fileName=self.object_path,
shapeType=pybullet.GEOM_MESH,
rgbaColor=None,
meshScale=[self.block_length] * 3)
collision_shape_id = self.physics_client.createCollisionShape(fileName=self.object_path,
shapeType=pybullet.GEOM_MESH,
meshScale=[self.block_length] * 3)
texture_id = self.physics_client.loadTexture(self.object_texture_path)
self.object_id = self.physics_client.createMultiBody(baseMass=0.1,
baseVisualShapeIndex=visual_shape_id,
baseCollisionShapeIndex=collision_shape_id,
basePosition=object_start_position.tolist(),
baseOrientation=object_start_orientation)
self.physics_client.changeVisualShape(self.object_id, -1, textureUniqueId=texture_id)
self.goal = self.sample_goal()
self.current_episode_steps = 0
observation = self.get_observation()
return observation
def get_palm_position(self):
palm_pos = []
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[12] in PALM_LINK:
link_state = self.physics_client.getLinkState(self.model_id, n)
palm_pos.append(link_state[0])
return np.array(palm_pos[0]).copy()
def get_observation(self):
"""Get observations.
Iterate over all movable joints and get the positions and velocities.
Note: HER style.
"""
robot_joint_pos = []
robot_joint_vel = []
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[1] in MOVABLE_JOINTS:
joint_state = self.physics_client.getJointState(self.model_id, n)
robot_joint_pos.append(joint_state[0])
robot_joint_vel.append(joint_state[1])
cur_pos, cur_orientation, cur_lin_vel, cur_ang_vel = self.get_current_object_state()
achieved_goal = np.concatenate([cur_pos, cur_orientation])
observation = np.concatenate([robot_joint_pos,
robot_joint_vel,
cur_lin_vel,
cur_ang_vel,
achieved_goal])
return {
"observation": observation.copy(),
"achieved_goal": achieved_goal.copy(),
"desired_goal": self.goal.copy()
}
def step(self, action):
"""Perform (a) simulation step(s).
Move movable joints within their range (-1, 1).
Do the actual simulation.
Calculate the environment stuff (observation, reward, done, info).
"""
# Clip action values
action = np.clip(action, self.action_space.low, self.action_space.high)
joint_limit_low = []
joint_limit_high = []
joints_movable = []
joints_coupled = []
# Get joint limits and distinct between movable and coupled joints
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[1] in MOVABLE_JOINTS:
joint_limit_low.append(joint_info[8])
joint_limit_high.append(joint_info[9])
joints_movable.append(n)
elif joint_info[1] in COUPLED_JOINTS:
joints_coupled.append(n)
joint_limit_low = np.array(joint_limit_low)
joint_limit_high = np.array(joint_limit_high)
act_range = (joint_limit_high - joint_limit_low) / 2.
act_center = (joint_limit_high + joint_limit_low) / 2.
# Calculate the control action
ctrl = act_center + act_range * action
ctrl = np.clip(ctrl, joint_limit_low, joint_limit_high)
# Actually move the joints
for n in range(self.n_model_joints):
if n in joints_movable:
k = joints_movable.index(n)
self.physics_client.setJointMotorControl2(bodyUniqueId=self.model_id,
jointIndex=joints_movable[k],
controlMode=pybullet.POSITION_CONTROL,
targetPosition=ctrl[k],
positionGain=self.position_gain)
else:
if n in joints_coupled and n - 1 in joints_movable:
k = joints_movable.index(n - 1)
self.physics_client.setJointMotorControl2(bodyUniqueId=self.model_id,
jointIndex=n,
controlMode=pybullet.POSITION_CONTROL,
targetPosition=ctrl[k] * self.couple_factor[
joints_coupled.index(n)],
positionGain=self.position_gain)
else:
self.physics_client.setJointMotorControl2(bodyUniqueId=self.model_id,
jointIndex=n,
controlMode=pybullet.POSITION_CONTROL,
targetPosition=0.,
positionGain=self.position_gain)
self.do_simulation()
observation = self.get_observation()
done = self.is_success(observation["achieved_goal"], observation["desired_goal"])
info = {"is_success": done}
reward = self.compute_reward(observation["achieved_goal"], observation["desired_goal"], info)
# Always set done to false to achieve stable end-position
# Only change it if max episode steps are reached OR block fell off
done = False
if not done and self.current_episode_steps == self.max_steps_per_episode:
done = True
# Block fell off
cur_pos = observation["achieved_goal"][:3]
palm_pos = self.get_palm_position()
if cur_pos[1] > palm_pos[1] + 0.04:
# Huge negative reward for this action - Nope
# reward -= 10000.
# done = True
info["is_success"] = False
self.current_episode_steps += 1
return observation, reward, done, info
def set_initial_pos(self):
"""Set initial position."""
for n in range(self.n_model_joints):
joint_info = self.physics_client.getJointInfo(self.model_id, n)
if joint_info[1] in MOVABLE_JOINTS or joint_info[1] in COUPLED_JOINTS:
self.physics_client.setJointMotorControl2(bodyUniqueId=self.model_id,
jointIndex=n,
controlMode=pybullet.POSITION_CONTROL,
targetPosition=self.initial_pos[joint_info[1]],
positionGain=self.position_gain)
# Settle in
for _ in range(20):
self.do_simulation()
def sample_goal(self):
"""Set goal."""
orientation = pybullet.getQuaternionFromEuler(GOAL_ORIENTATION)
if self.orientation_type == "6D":
orientation = quaternion_to_continuous6D(orientation)
object_pos, _, _, _ = self.get_current_object_state()
return np.concatenate([object_pos, np.array(orientation)]).copy()
def is_success(self, achieved_goal, desired_goal):
"""Goal distance.
Distance between achieved_goal (current orientation) and goal.
"""
distance = goal_distance(achieved_goal, desired_goal)
return (distance < self.orientation_threshold).astype(np.float32)
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute reward.
Chose between dense and sparse.
"""
if self.reward_type == "sparse":
return self.is_success(achieved_goal, desired_goal) - 1
else:
distance = goal_distance(achieved_goal, desired_goal)
return -distance
def get_current_object_state(self):
"""Get position and rotation of the block."""
try:
object_position, object_orientation = self.physics_client.getBasePositionAndOrientation(self.object_id)
object_velocity = self.physics_client.getBaseVelocity(self.object_id)
object_linear_velocity = object_velocity[0]
object_angular_velocity = object_velocity[1]
except AttributeError:
object_position = [0.] * 3
object_orientation = pybullet.getQuaternionFromEuler([0.] * 3)
object_linear_velocity = [0.] * 3
object_angular_velocity = [0.] * 3
if self.orientation_type == "6D":
object_orientation = quaternion_to_continuous6D(object_orientation)
return np.array(object_position).copy(), np.array(object_orientation).copy(), \
np.array(object_linear_velocity).copy(), np.array(object_angular_velocity).copy()
def render(self, mode="human", close=False):
if mode == "rgb_array":
# Camera defaults
camera_view_matrix = pybullet.computeViewMatrix(cameraEyePosition=[0.4, -0.35, 0.5],
cameraTargetPosition=[0., 0., 0.3],
cameraUpVector=[-1., 0., -1.])
camera_projection_matrix = pybullet.computeProjectionMatrixFOV(fov=45., aspect=1., nearVal=0.1,
farVal=1.1)
img = self.physics_client.getCameraImage(width=512, height=512,
viewMatrix=camera_view_matrix,
projectionMatrix=camera_projection_matrix)
return img[2]
else:
pass
| 42.242521
| 120
| 0.561142
| 4,234
| 39,539
| 4.976618
| 0.099197
| 0.007546
| 0.037113
| 0.007973
| 0.872479
| 0.852689
| 0.849556
| 0.845665
| 0.844004
| 0.832898
| 0
| 0.029098
| 0.355067
| 39,539
| 935
| 121
| 42.287701
| 0.797216
| 0.13445
| 0
| 0.821678
| 0
| 0
| 0.033131
| 0.000655
| 0
| 0
| 0
| 0
| 0.01049
| 1
| 0.050699
| false
| 0.003497
| 0.015734
| 0
| 0.117133
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e2a0da095a16b2a158beb4395c1a7143b1f34ed2
| 47
|
py
|
Python
|
project_starter/__init__.py
|
flunka/project_starter
|
bf16e289b98c07e9053797640a1bd540e312a9a2
|
[
"MIT"
] | null | null | null |
project_starter/__init__.py
|
flunka/project_starter
|
bf16e289b98c07e9053797640a1bd540e312a9a2
|
[
"MIT"
] | null | null | null |
project_starter/__init__.py
|
flunka/project_starter
|
bf16e289b98c07e9053797640a1bd540e312a9a2
|
[
"MIT"
] | null | null | null |
from . import Repo
from . import BitbucketRepo
| 15.666667
| 27
| 0.787234
| 6
| 47
| 6.166667
| 0.666667
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 28
| 23.5
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2c594d92e5f212a5b5807607963389e7dfb37fae
| 22,341
|
py
|
Python
|
devilry/devilry_import_v2database/tests/test_modelimporters/test_staticfeedbackimporter.py
|
devilry/devilry-django
|
9ae28e462dfa4cfee966ebacbca04ade9627e715
|
[
"BSD-3-Clause"
] | 29
|
2015-01-18T22:56:23.000Z
|
2020-11-10T21:28:27.000Z
|
devilry/devilry_import_v2database/tests/test_modelimporters/test_staticfeedbackimporter.py
|
devilry/devilry-django
|
9ae28e462dfa4cfee966ebacbca04ade9627e715
|
[
"BSD-3-Clause"
] | 786
|
2015-01-06T16:10:18.000Z
|
2022-03-16T11:10:50.000Z
|
devilry/devilry_import_v2database/tests/test_modelimporters/test_staticfeedbackimporter.py
|
devilry/devilry-django
|
9ae28e462dfa4cfee966ebacbca04ade9627e715
|
[
"BSD-3-Clause"
] | 15
|
2015-04-06T06:18:43.000Z
|
2021-02-24T12:28:30.000Z
|
import os
import tempfile
import unittest
import shutil
from django import test
from django.conf import settings
from devilry.utils import datetimeutils
from model_bakery import baker
from devilry.devilry_comment.models import CommentFile
from devilry.devilry_group.models import FeedbackSet, GroupComment
from devilry.devilry_import_v2database.modelimporters.delivery_feedback_importers \
import StaticFeedbackImporter, DeliveryImporter
from .importer_testcase_mixin import ImporterTestCaseMixin
@unittest.skip('Not relevant anymore, keep for history.')
class TestStaticFeedbackImporterImporter(ImporterTestCaseMixin, test.TestCase):
def setUp(self):
self.v2_media_root_temp_dir = tempfile.mkdtemp()
def tearDown(self):
super(TestStaticFeedbackImporterImporter, self).tearDown()
shutil.rmtree(self.v2_media_root_temp_dir)
def _create_staticfeedback_dict(self, feedback_set, file_info_dict=None, examiner_user_id=None):
return {
'pk': 1,
'model': 'core.staticfeedback',
'fields': {
'is_passing_grade': True,
'grade': '2/4',
'saved_by': examiner_user_id,
'delivery': 1,
'points': 2,
'files': file_info_dict or {},
'deadline_id': feedback_set.id,
'save_timestamp': '2017-05-15T11:04:46.817',
'rendered_view': '<p>Quo tempore facilis eos suscipit eum doloremque libero'
' veniam nisi?</p>\n<p>Magnam mollitia alias consequatur nisi'
' nam error dolor laboriosam aperiam? Nihil eligendi voluptatem,'
' eveniet iure officiis amet laborum debitis nisi in, '
'molestias similique vero quos beatae obcaecati neque laudantium '
'suscipit rerum repudiandae, facilis doloribus autem molestias '
'asperiores perferendis est delectus alias porro laboriosam culpa, '
'iusto ut aliquid et? Id iusto dolor consequatur necessitatibus explicabo '
'repellendus, suscipit nisi non.</p>\n<p>Quas natus id nulla pariatur'
' similique ducimus mollitia ea tenetur veniam fugiat, rerum temporibus '
'tempore eaque nemo at, nihil dolores ad ducimus delectus quasi nesciunt '
'illo, aspernatur ullam officia aperiam officiis harum repellat pariatur '
'quaerat deserunt sint. Debitis nam deserunt autem voluptas? Debitis libero'
' beatae deserunt et ullam expedita aliquid inventore autem nam veniam, '
'dolore rem ea voluptatibus placeat explicabo.</p>'
}
}
def test_importer(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.staticfeedback',
data=self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id)
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
self.assertEqual(FeedbackSet.objects.count(), 1)
self.assertEqual(GroupComment.objects.count(), 1)
def test_importer_feedback_set(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.staticfeedback',
data=self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id)
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.feedback_set, test_feedbackset)
def test_importer_user(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.staticfeedback',
data=self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id)
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.user, test_examiner_user)
def test_importer_user_role(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.staticfeedback',
data=self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id)
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.user_role, GroupComment.USER_ROLE_EXAMINER)
def test_importer_text(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
staticfeedback_data_dict = self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id)
self.create_v2dump(
model_name='core.staticfeedback',
data=staticfeedback_data_dict
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.text, staticfeedback_data_dict['fields']['rendered_view'])
def test_importer_comment_type(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.staticfeedback',
data=self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id)
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.comment_type, GroupComment.COMMENT_TYPE_GROUPCOMMENT)
def test_importer_comment_is_part_of_grading(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.staticfeedback',
data=self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id)
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertTrue(comment.part_of_grading)
def test_importer_published_datetime(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
staticfeedback_data_dict = self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id
)
self.create_v2dump(
model_name='core.staticfeedback',
data=staticfeedback_data_dict
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(
comment.published_datetime,
datetimeutils.from_isoformat(staticfeedback_data_dict['fields']['save_timestamp'])
)
def test_importer_feedback_set_grading_published_datetime(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
staticfeedback_data_dict = self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id
)
self.create_v2dump(
model_name='core.staticfeedback',
data=staticfeedback_data_dict
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
feedback_set = GroupComment.objects.first().feedback_set
self.assertEqual(
feedback_set.grading_published_datetime,
datetimeutils.from_isoformat(staticfeedback_data_dict['fields']['save_timestamp']))
def test_importer_feedback_set_grading_points(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
staticfeedback_data_dict = self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id
)
self.create_v2dump(
model_name='core.staticfeedback',
data=staticfeedback_data_dict
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
feedback_set = GroupComment.objects.first().feedback_set
self.assertEqual(feedback_set.grading_points, staticfeedback_data_dict['fields']['points'])
def test_importer_feedback_set_grading_published_by(self):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
staticfeedback_data_dict = self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id
)
self.create_v2dump(
model_name='core.staticfeedback',
data=staticfeedback_data_dict
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
feedback_set = GroupComment.objects.first().feedback_set
self.assertEqual(feedback_set.grading_published_by, test_examiner_user)
def test_importer_comment_file_attributes(self):
with self.settings(DEVILRY_V2_MEDIA_ROOT=self.v2_media_root_temp_dir):
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.staticfeedback',
data=self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id,
file_info_dict={
'1': {
'id': 1,
'filename': 'test.py',
'relative_file_path': 'test.py',
}
})
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
self.assertEqual(CommentFile.objects.count(), 1)
comment_file = CommentFile.objects.first()
self.assertEqual(comment_file.filename, 'test.py')
self.assertEqual(comment_file.mimetype, 'text/x-python')
@unittest.skip('Not relevant anymore, keep for history.')
class TestDeliveryAndStaticFeedbackImporterImporter(ImporterTestCaseMixin, test.TestCase):
"""
Tests to make sure StaticFeedbacks are created with an auto incremented sequence number that should
start at the Delivery meta datas max_id.
We need to do this because deliveries and feedbacks are to different models in Devilry V2, but they will both
be created as GroupComments in Devilry V3. So if we kept the primary keys for both Delivery and
StaticFeedback, these would eventually crash when creating the GroupComments.
The GroupComments for a Delivery will keep the primary key from Devilry V2, and the GroupComments for feedbacks
will get an auto incremented sequence number starting at the Delivery meta data max_id.
Each test will be a kind of simulation, and represents the order in which the importers for
Delivery and StaticFeedback must be run.
"""
def _create_model_meta_for_delivery(self):
return {
'model_class_name': 'Delivery',
'max_id': 143,
'app_label': 'core'
}
def _create_delivery_dict(self, feedback_set, candidate_id=None):
return {
'pk': 3,
'model': 'core.delivery',
'fields': {
'delivery_type': 0,
'alias_delivery': None,
'successful': True,
'number': 1,
'delivered_by': candidate_id,
'last_feedback': 3,
'deadline': feedback_set.id,
'copy_of': None,
'time_of_delivery': '2016-04-10T07:04:00'
},
}
def _create_staticfeedback_dict(self, feedback_set, examiner_user_id=None):
return {
'pk': 1,
'model': 'core.staticfeedback',
'fields': {
'is_passing_grade': True,
'grade': '2/4',
'saved_by': examiner_user_id,
'delivery': 1,
'points': 2,
'files': {},
'deadline_id': feedback_set.id,
'save_timestamp': '2017-05-15T11:04:46.817',
'rendered_view': '<p>Quo tempore facilis eos suscipit eum doloremque libero'
' veniam nisi?</p>\n<p>Magnam mollitia alias consequatur nisi'
' nam error dolor laboriosam aperiam? Nihil eligendi voluptatem,'
' eveniet iure officiis amet laborum debitis nisi in, '
'molestias similique vero quos beatae obcaecati neque laudantium '
'suscipit rerum repudiandae, facilis doloribus autem molestias '
'asperiores perferendis est delectus alias porro laboriosam culpa, '
'iusto ut aliquid et? Id iusto dolor consequatur necessitatibus explicabo '
'repellendus, suscipit nisi non.</p>\n<p>Quas natus id nulla pariatur'
' similique ducimus mollitia ea tenetur veniam fugiat, rerum temporibus '
'tempore eaque nemo at, nihil dolores ad ducimus delectus quasi nesciunt '
'illo, aspernatur ullam officia aperiam officiis harum repellat pariatur '
'quaerat deserunt sint. Debitis nam deserunt autem voluptas? Debitis libero'
' beatae deserunt et ullam expedita aliquid inventore autem nam veniam, '
'dolore rem ea voluptatibus placeat explicabo.</p>'
}
}
def test_importer(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id)
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
staticfeedback_data_dict = self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id
)
self.create_v2dump(
model_name='core.staticfeedback',
data=staticfeedback_data_dict
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
self.assertEqual(GroupComment.objects.count(), 2)
def test_importer_feedback_comments_id_starts_at_max_id(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_examiner_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
baker.make('core.Examiner',
assignmentgroup=test_group,
relatedexaminer__user=test_examiner_user,
relatedexaminer__period=test_group.parentnode.parentnode)
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id),
model_meta=self._create_model_meta_for_delivery()
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
self.create_v2dump(
model_name='core.staticfeedback',
data=self._create_staticfeedback_dict(
feedback_set=test_feedbackset,
examiner_user_id=test_examiner_user.id
)
)
StaticFeedbackImporter(input_root=self.temp_root_dir).import_models()
self.assertEqual(GroupComment.objects.count(), 2)
delivery_comment = GroupComment.objects.filter(user_role=GroupComment.USER_ROLE_STUDENT).first()
feedback_comment = GroupComment.objects.filter(user_role=GroupComment.USER_ROLE_EXAMINER).first()
self.assertEqual(delivery_comment.pk, 3)
self.assertEqual(delivery_comment.id, 3)
self.assertEqual(feedback_comment.pk, self._create_model_meta_for_delivery()['max_id']+1)
self.assertEqual(feedback_comment.id, self._create_model_meta_for_delivery()['max_id']+1)
| 51.006849
| 115
| 0.644868
| 2,279
| 22,341
| 6.004388
| 0.128126
| 0.05437
| 0.051447
| 0.032301
| 0.82929
| 0.815332
| 0.800862
| 0.791654
| 0.791654
| 0.762862
| 0
| 0.006217
| 0.280023
| 22,341
| 437
| 116
| 51.12357
| 0.844514
| 0.034018
| 0
| 0.703518
| 0
| 0
| 0.175591
| 0.01839
| 0
| 0
| 0
| 0
| 0.052764
| 1
| 0.050251
| false
| 0.005025
| 0.115578
| 0.01005
| 0.180905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c6c5a6045da193b0f7d3c515d3a7fb928719823
| 157,673
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class counters(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/signaling-protocols/rsvp-te/interface-attributes/interface/state/counters. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Interface specific RSVP statistics and counters
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__in_path_messages",
"__in_path_error_messages",
"__in_path_tear_messages",
"__in_reservation_messages",
"__in_reservation_error_messages",
"__in_reservation_tear_messages",
"__in_hello_messages",
"__in_srefresh_messages",
"__in_ack_messages",
"__out_path_messages",
"__out_path_error_messages",
"__out_path_tear_messages",
"__out_reservation_messages",
"__out_reservation_error_messages",
"__out_reservation_tear_messages",
"__out_hello_messages",
"__out_srefresh_messages",
"__out_ack_messages",
)
_yang_name = "counters"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__in_path_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_path_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_path_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_reservation_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_reservation_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_reservation_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_hello_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_srefresh_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_ack_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_path_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_path_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_path_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_reservation_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_reservation_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_reservation_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_hello_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_srefresh_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_ack_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"rsvp-te",
"interface-attributes",
"interface",
"state",
"counters",
]
def _get_in_path_messages(self):
"""
Getter method for in_path_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_messages (yang:counter64)
YANG Description: Number of received RSVP Path messages
"""
return self.__in_path_messages
def _set_in_path_messages(self, v, load=False):
"""
Setter method for in_path_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_path_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_path_messages() directly.
YANG Description: Number of received RSVP Path messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_path_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-path-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_path_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_path_messages(self):
self.__in_path_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_path_error_messages(self):
"""
Getter method for in_path_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_error_messages (yang:counter64)
YANG Description: Number of received RSVP Path Error messages
"""
return self.__in_path_error_messages
def _set_in_path_error_messages(self, v, load=False):
"""
Setter method for in_path_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_error_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_path_error_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_path_error_messages() directly.
YANG Description: Number of received RSVP Path Error messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_path_error_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-path-error-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_path_error_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_path_error_messages(self):
self.__in_path_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_path_tear_messages(self):
"""
Getter method for in_path_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_tear_messages (yang:counter64)
YANG Description: Number of received RSVP Path Tear messages
"""
return self.__in_path_tear_messages
def _set_in_path_tear_messages(self, v, load=False):
"""
Setter method for in_path_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_tear_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_path_tear_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_path_tear_messages() directly.
YANG Description: Number of received RSVP Path Tear messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_path_tear_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-path-tear-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_path_tear_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_path_tear_messages(self):
self.__in_path_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_reservation_messages(self):
"""
Getter method for in_reservation_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_messages (yang:counter64)
YANG Description: Number of received RSVP Resv messages
"""
return self.__in_reservation_messages
def _set_in_reservation_messages(self, v, load=False):
"""
Setter method for in_reservation_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_reservation_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_reservation_messages() directly.
YANG Description: Number of received RSVP Resv messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_reservation_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-reservation-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_reservation_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_reservation_messages(self):
self.__in_reservation_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_reservation_error_messages(self):
"""
Getter method for in_reservation_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_error_messages (yang:counter64)
YANG Description: Number of received RSVP Resv Error messages
"""
return self.__in_reservation_error_messages
def _set_in_reservation_error_messages(self, v, load=False):
"""
Setter method for in_reservation_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_error_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_reservation_error_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_reservation_error_messages() directly.
YANG Description: Number of received RSVP Resv Error messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_reservation_error_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-reservation-error-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_reservation_error_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_reservation_error_messages(self):
self.__in_reservation_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_reservation_tear_messages(self):
"""
Getter method for in_reservation_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_tear_messages (yang:counter64)
YANG Description: Number of received RSVP Resv Tear messages
"""
return self.__in_reservation_tear_messages
def _set_in_reservation_tear_messages(self, v, load=False):
"""
Setter method for in_reservation_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_tear_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_reservation_tear_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_reservation_tear_messages() directly.
YANG Description: Number of received RSVP Resv Tear messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_reservation_tear_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-reservation-tear-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_reservation_tear_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_reservation_tear_messages(self):
self.__in_reservation_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_hello_messages(self):
"""
Getter method for in_hello_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_hello_messages (yang:counter64)
YANG Description: Number of received RSVP hello messages
"""
return self.__in_hello_messages
def _set_in_hello_messages(self, v, load=False):
"""
Setter method for in_hello_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_hello_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_hello_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_hello_messages() directly.
YANG Description: Number of received RSVP hello messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_hello_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-hello-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_hello_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_hello_messages(self):
self.__in_hello_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_srefresh_messages(self):
"""
Getter method for in_srefresh_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_srefresh_messages (yang:counter64)
YANG Description: Number of received RSVP summary refresh messages
"""
return self.__in_srefresh_messages
def _set_in_srefresh_messages(self, v, load=False):
"""
Setter method for in_srefresh_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_srefresh_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_srefresh_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_srefresh_messages() directly.
YANG Description: Number of received RSVP summary refresh messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_srefresh_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-srefresh-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_srefresh_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_srefresh_messages(self):
self.__in_srefresh_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_ack_messages(self):
"""
Getter method for in_ack_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_ack_messages (yang:counter64)
YANG Description: Number of received RSVP refresh reduction ack
messages
"""
return self.__in_ack_messages
def _set_in_ack_messages(self, v, load=False):
"""
Setter method for in_ack_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_ack_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_ack_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_ack_messages() directly.
YANG Description: Number of received RSVP refresh reduction ack
messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_ack_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-ack-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_ack_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_ack_messages(self):
self.__in_ack_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_path_messages(self):
"""
Getter method for out_path_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_messages (yang:counter64)
YANG Description: Number of sent RSVP PATH messages
"""
return self.__out_path_messages
def _set_out_path_messages(self, v, load=False):
"""
Setter method for out_path_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_path_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_path_messages() directly.
YANG Description: Number of sent RSVP PATH messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_path_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-path-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_path_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_path_messages(self):
self.__out_path_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_path_error_messages(self):
"""
Getter method for out_path_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_error_messages (yang:counter64)
YANG Description: Number of sent RSVP Path Error messages
"""
return self.__out_path_error_messages
def _set_out_path_error_messages(self, v, load=False):
"""
Setter method for out_path_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_error_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_path_error_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_path_error_messages() directly.
YANG Description: Number of sent RSVP Path Error messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_path_error_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-path-error-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_path_error_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_path_error_messages(self):
self.__out_path_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_path_tear_messages(self):
"""
Getter method for out_path_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_tear_messages (yang:counter64)
YANG Description: Number of sent RSVP Path Tear messages
"""
return self.__out_path_tear_messages
def _set_out_path_tear_messages(self, v, load=False):
"""
Setter method for out_path_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_tear_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_path_tear_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_path_tear_messages() directly.
YANG Description: Number of sent RSVP Path Tear messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_path_tear_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-path-tear-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_path_tear_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_path_tear_messages(self):
self.__out_path_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_reservation_messages(self):
"""
Getter method for out_reservation_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_messages (yang:counter64)
YANG Description: Number of sent RSVP Resv messages
"""
return self.__out_reservation_messages
def _set_out_reservation_messages(self, v, load=False):
"""
Setter method for out_reservation_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_reservation_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_reservation_messages() directly.
YANG Description: Number of sent RSVP Resv messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_reservation_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-reservation-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_reservation_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_reservation_messages(self):
self.__out_reservation_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_reservation_error_messages(self):
"""
Getter method for out_reservation_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_error_messages (yang:counter64)
YANG Description: Number of sent RSVP Resv Error messages
"""
return self.__out_reservation_error_messages
def _set_out_reservation_error_messages(self, v, load=False):
"""
Setter method for out_reservation_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_error_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_reservation_error_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_reservation_error_messages() directly.
YANG Description: Number of sent RSVP Resv Error messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_reservation_error_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-reservation-error-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_reservation_error_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_reservation_error_messages(self):
self.__out_reservation_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_reservation_tear_messages(self):
"""
Getter method for out_reservation_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_tear_messages (yang:counter64)
YANG Description: Number of sent RSVP Resv Tear messages
"""
return self.__out_reservation_tear_messages
def _set_out_reservation_tear_messages(self, v, load=False):
"""
Setter method for out_reservation_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_tear_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_reservation_tear_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_reservation_tear_messages() directly.
YANG Description: Number of sent RSVP Resv Tear messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_reservation_tear_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-reservation-tear-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_reservation_tear_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_reservation_tear_messages(self):
self.__out_reservation_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_hello_messages(self):
"""
Getter method for out_hello_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_hello_messages (yang:counter64)
YANG Description: Number of sent RSVP hello messages
"""
return self.__out_hello_messages
def _set_out_hello_messages(self, v, load=False):
"""
Setter method for out_hello_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_hello_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_hello_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_hello_messages() directly.
YANG Description: Number of sent RSVP hello messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_hello_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-hello-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_hello_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_hello_messages(self):
self.__out_hello_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_srefresh_messages(self):
"""
Getter method for out_srefresh_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_srefresh_messages (yang:counter64)
YANG Description: Number of sent RSVP summary refresh messages
"""
return self.__out_srefresh_messages
def _set_out_srefresh_messages(self, v, load=False):
"""
Setter method for out_srefresh_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_srefresh_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_srefresh_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_srefresh_messages() directly.
YANG Description: Number of sent RSVP summary refresh messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_srefresh_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-srefresh-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_srefresh_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_srefresh_messages(self):
self.__out_srefresh_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_ack_messages(self):
"""
Getter method for out_ack_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_ack_messages (yang:counter64)
YANG Description: Number of sent RSVP refresh reduction ack messages
"""
return self.__out_ack_messages
def _set_out_ack_messages(self, v, load=False):
"""
Setter method for out_ack_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_ack_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_ack_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_ack_messages() directly.
YANG Description: Number of sent RSVP refresh reduction ack messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_ack_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-ack-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_ack_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_ack_messages(self):
self.__out_ack_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
in_path_messages = __builtin__.property(_get_in_path_messages)
in_path_error_messages = __builtin__.property(_get_in_path_error_messages)
in_path_tear_messages = __builtin__.property(_get_in_path_tear_messages)
in_reservation_messages = __builtin__.property(_get_in_reservation_messages)
in_reservation_error_messages = __builtin__.property(
_get_in_reservation_error_messages
)
in_reservation_tear_messages = __builtin__.property(
_get_in_reservation_tear_messages
)
in_hello_messages = __builtin__.property(_get_in_hello_messages)
in_srefresh_messages = __builtin__.property(_get_in_srefresh_messages)
in_ack_messages = __builtin__.property(_get_in_ack_messages)
out_path_messages = __builtin__.property(_get_out_path_messages)
out_path_error_messages = __builtin__.property(_get_out_path_error_messages)
out_path_tear_messages = __builtin__.property(_get_out_path_tear_messages)
out_reservation_messages = __builtin__.property(_get_out_reservation_messages)
out_reservation_error_messages = __builtin__.property(
_get_out_reservation_error_messages
)
out_reservation_tear_messages = __builtin__.property(
_get_out_reservation_tear_messages
)
out_hello_messages = __builtin__.property(_get_out_hello_messages)
out_srefresh_messages = __builtin__.property(_get_out_srefresh_messages)
out_ack_messages = __builtin__.property(_get_out_ack_messages)
_pyangbind_elements = OrderedDict(
[
("in_path_messages", in_path_messages),
("in_path_error_messages", in_path_error_messages),
("in_path_tear_messages", in_path_tear_messages),
("in_reservation_messages", in_reservation_messages),
("in_reservation_error_messages", in_reservation_error_messages),
("in_reservation_tear_messages", in_reservation_tear_messages),
("in_hello_messages", in_hello_messages),
("in_srefresh_messages", in_srefresh_messages),
("in_ack_messages", in_ack_messages),
("out_path_messages", out_path_messages),
("out_path_error_messages", out_path_error_messages),
("out_path_tear_messages", out_path_tear_messages),
("out_reservation_messages", out_reservation_messages),
("out_reservation_error_messages", out_reservation_error_messages),
("out_reservation_tear_messages", out_reservation_tear_messages),
("out_hello_messages", out_hello_messages),
("out_srefresh_messages", out_srefresh_messages),
("out_ack_messages", out_ack_messages),
]
)
class counters(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/signaling-protocols/rsvp-te/interface-attributes/interface/state/counters. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Interface specific RSVP statistics and counters
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__in_path_messages",
"__in_path_error_messages",
"__in_path_tear_messages",
"__in_reservation_messages",
"__in_reservation_error_messages",
"__in_reservation_tear_messages",
"__in_hello_messages",
"__in_srefresh_messages",
"__in_ack_messages",
"__out_path_messages",
"__out_path_error_messages",
"__out_path_tear_messages",
"__out_reservation_messages",
"__out_reservation_error_messages",
"__out_reservation_tear_messages",
"__out_hello_messages",
"__out_srefresh_messages",
"__out_ack_messages",
)
_yang_name = "counters"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__in_path_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_path_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_path_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_reservation_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_reservation_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_reservation_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_hello_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_srefresh_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_ack_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_path_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_path_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_path_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_reservation_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_reservation_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_reservation_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_hello_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_srefresh_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_ack_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"rsvp-te",
"interface-attributes",
"interface",
"state",
"counters",
]
def _get_in_path_messages(self):
"""
Getter method for in_path_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_messages (yang:counter64)
YANG Description: Number of received RSVP Path messages
"""
return self.__in_path_messages
def _set_in_path_messages(self, v, load=False):
"""
Setter method for in_path_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_path_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_path_messages() directly.
YANG Description: Number of received RSVP Path messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_path_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-path-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_path_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_path_messages(self):
self.__in_path_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_path_error_messages(self):
"""
Getter method for in_path_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_error_messages (yang:counter64)
YANG Description: Number of received RSVP Path Error messages
"""
return self.__in_path_error_messages
def _set_in_path_error_messages(self, v, load=False):
"""
Setter method for in_path_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_error_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_path_error_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_path_error_messages() directly.
YANG Description: Number of received RSVP Path Error messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_path_error_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-path-error-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_path_error_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_path_error_messages(self):
self.__in_path_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_path_tear_messages(self):
"""
Getter method for in_path_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_tear_messages (yang:counter64)
YANG Description: Number of received RSVP Path Tear messages
"""
return self.__in_path_tear_messages
def _set_in_path_tear_messages(self, v, load=False):
"""
Setter method for in_path_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_path_tear_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_path_tear_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_path_tear_messages() directly.
YANG Description: Number of received RSVP Path Tear messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_path_tear_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-path-tear-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_path_tear_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_path_tear_messages(self):
self.__in_path_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_reservation_messages(self):
"""
Getter method for in_reservation_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_messages (yang:counter64)
YANG Description: Number of received RSVP Resv messages
"""
return self.__in_reservation_messages
def _set_in_reservation_messages(self, v, load=False):
"""
Setter method for in_reservation_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_reservation_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_reservation_messages() directly.
YANG Description: Number of received RSVP Resv messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_reservation_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-reservation-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_reservation_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_reservation_messages(self):
self.__in_reservation_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_reservation_error_messages(self):
"""
Getter method for in_reservation_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_error_messages (yang:counter64)
YANG Description: Number of received RSVP Resv Error messages
"""
return self.__in_reservation_error_messages
def _set_in_reservation_error_messages(self, v, load=False):
"""
Setter method for in_reservation_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_error_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_reservation_error_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_reservation_error_messages() directly.
YANG Description: Number of received RSVP Resv Error messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_reservation_error_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-reservation-error-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_reservation_error_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_reservation_error_messages(self):
self.__in_reservation_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_reservation_tear_messages(self):
"""
Getter method for in_reservation_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_tear_messages (yang:counter64)
YANG Description: Number of received RSVP Resv Tear messages
"""
return self.__in_reservation_tear_messages
def _set_in_reservation_tear_messages(self, v, load=False):
"""
Setter method for in_reservation_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_reservation_tear_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_reservation_tear_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_reservation_tear_messages() directly.
YANG Description: Number of received RSVP Resv Tear messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_reservation_tear_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-reservation-tear-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_reservation_tear_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_reservation_tear_messages(self):
self.__in_reservation_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_hello_messages(self):
"""
Getter method for in_hello_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_hello_messages (yang:counter64)
YANG Description: Number of received RSVP hello messages
"""
return self.__in_hello_messages
def _set_in_hello_messages(self, v, load=False):
"""
Setter method for in_hello_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_hello_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_hello_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_hello_messages() directly.
YANG Description: Number of received RSVP hello messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_hello_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-hello-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_hello_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_hello_messages(self):
self.__in_hello_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_srefresh_messages(self):
"""
Getter method for in_srefresh_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_srefresh_messages (yang:counter64)
YANG Description: Number of received RSVP summary refresh messages
"""
return self.__in_srefresh_messages
def _set_in_srefresh_messages(self, v, load=False):
"""
Setter method for in_srefresh_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_srefresh_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_srefresh_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_srefresh_messages() directly.
YANG Description: Number of received RSVP summary refresh messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_srefresh_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-srefresh-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_srefresh_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_srefresh_messages(self):
self.__in_srefresh_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_ack_messages(self):
"""
Getter method for in_ack_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_ack_messages (yang:counter64)
YANG Description: Number of received RSVP refresh reduction ack
messages
"""
return self.__in_ack_messages
def _set_in_ack_messages(self, v, load=False):
"""
Setter method for in_ack_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/in_ack_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_ack_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_ack_messages() directly.
YANG Description: Number of received RSVP refresh reduction ack
messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_ack_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-ack-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_ack_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_in_ack_messages(self):
self.__in_ack_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_path_messages(self):
"""
Getter method for out_path_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_messages (yang:counter64)
YANG Description: Number of sent RSVP PATH messages
"""
return self.__out_path_messages
def _set_out_path_messages(self, v, load=False):
"""
Setter method for out_path_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_path_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_path_messages() directly.
YANG Description: Number of sent RSVP PATH messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_path_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-path-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_path_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_path_messages(self):
self.__out_path_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_path_error_messages(self):
"""
Getter method for out_path_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_error_messages (yang:counter64)
YANG Description: Number of sent RSVP Path Error messages
"""
return self.__out_path_error_messages
def _set_out_path_error_messages(self, v, load=False):
"""
Setter method for out_path_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_error_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_path_error_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_path_error_messages() directly.
YANG Description: Number of sent RSVP Path Error messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_path_error_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-path-error-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_path_error_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_path_error_messages(self):
self.__out_path_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_path_tear_messages(self):
"""
Getter method for out_path_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_tear_messages (yang:counter64)
YANG Description: Number of sent RSVP Path Tear messages
"""
return self.__out_path_tear_messages
def _set_out_path_tear_messages(self, v, load=False):
"""
Setter method for out_path_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_path_tear_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_path_tear_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_path_tear_messages() directly.
YANG Description: Number of sent RSVP Path Tear messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_path_tear_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-path-tear-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_path_tear_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_path_tear_messages(self):
self.__out_path_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-path-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_reservation_messages(self):
"""
Getter method for out_reservation_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_messages (yang:counter64)
YANG Description: Number of sent RSVP Resv messages
"""
return self.__out_reservation_messages
def _set_out_reservation_messages(self, v, load=False):
"""
Setter method for out_reservation_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_reservation_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_reservation_messages() directly.
YANG Description: Number of sent RSVP Resv messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_reservation_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-reservation-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_reservation_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_reservation_messages(self):
self.__out_reservation_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_reservation_error_messages(self):
"""
Getter method for out_reservation_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_error_messages (yang:counter64)
YANG Description: Number of sent RSVP Resv Error messages
"""
return self.__out_reservation_error_messages
def _set_out_reservation_error_messages(self, v, load=False):
"""
Setter method for out_reservation_error_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_error_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_reservation_error_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_reservation_error_messages() directly.
YANG Description: Number of sent RSVP Resv Error messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_reservation_error_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-reservation-error-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_reservation_error_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_reservation_error_messages(self):
self.__out_reservation_error_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-error-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_reservation_tear_messages(self):
"""
Getter method for out_reservation_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_tear_messages (yang:counter64)
YANG Description: Number of sent RSVP Resv Tear messages
"""
return self.__out_reservation_tear_messages
def _set_out_reservation_tear_messages(self, v, load=False):
"""
Setter method for out_reservation_tear_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_reservation_tear_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_reservation_tear_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_reservation_tear_messages() directly.
YANG Description: Number of sent RSVP Resv Tear messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_reservation_tear_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-reservation-tear-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_reservation_tear_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_reservation_tear_messages(self):
self.__out_reservation_tear_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-reservation-tear-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_hello_messages(self):
"""
Getter method for out_hello_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_hello_messages (yang:counter64)
YANG Description: Number of sent RSVP hello messages
"""
return self.__out_hello_messages
def _set_out_hello_messages(self, v, load=False):
"""
Setter method for out_hello_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_hello_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_hello_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_hello_messages() directly.
YANG Description: Number of sent RSVP hello messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_hello_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-hello-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_hello_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_hello_messages(self):
self.__out_hello_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-hello-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_srefresh_messages(self):
"""
Getter method for out_srefresh_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_srefresh_messages (yang:counter64)
YANG Description: Number of sent RSVP summary refresh messages
"""
return self.__out_srefresh_messages
def _set_out_srefresh_messages(self, v, load=False):
"""
Setter method for out_srefresh_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_srefresh_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_srefresh_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_srefresh_messages() directly.
YANG Description: Number of sent RSVP summary refresh messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_srefresh_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-srefresh-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_srefresh_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_srefresh_messages(self):
self.__out_srefresh_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-srefresh-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_ack_messages(self):
"""
Getter method for out_ack_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_ack_messages (yang:counter64)
YANG Description: Number of sent RSVP refresh reduction ack messages
"""
return self.__out_ack_messages
def _set_out_ack_messages(self, v, load=False):
"""
Setter method for out_ack_messages, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/state/counters/out_ack_messages (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_ack_messages is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_ack_messages() directly.
YANG Description: Number of sent RSVP refresh reduction ack messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_ack_messages must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-ack-messages", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_ack_messages = t
if hasattr(self, "_set"):
self._set()
def _unset_out_ack_messages(self):
self.__out_ack_messages = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-ack-messages",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
in_path_messages = __builtin__.property(_get_in_path_messages)
in_path_error_messages = __builtin__.property(_get_in_path_error_messages)
in_path_tear_messages = __builtin__.property(_get_in_path_tear_messages)
in_reservation_messages = __builtin__.property(_get_in_reservation_messages)
in_reservation_error_messages = __builtin__.property(
_get_in_reservation_error_messages
)
in_reservation_tear_messages = __builtin__.property(
_get_in_reservation_tear_messages
)
in_hello_messages = __builtin__.property(_get_in_hello_messages)
in_srefresh_messages = __builtin__.property(_get_in_srefresh_messages)
in_ack_messages = __builtin__.property(_get_in_ack_messages)
out_path_messages = __builtin__.property(_get_out_path_messages)
out_path_error_messages = __builtin__.property(_get_out_path_error_messages)
out_path_tear_messages = __builtin__.property(_get_out_path_tear_messages)
out_reservation_messages = __builtin__.property(_get_out_reservation_messages)
out_reservation_error_messages = __builtin__.property(
_get_out_reservation_error_messages
)
out_reservation_tear_messages = __builtin__.property(
_get_out_reservation_tear_messages
)
out_hello_messages = __builtin__.property(_get_out_hello_messages)
out_srefresh_messages = __builtin__.property(_get_out_srefresh_messages)
out_ack_messages = __builtin__.property(_get_out_ack_messages)
_pyangbind_elements = OrderedDict(
[
("in_path_messages", in_path_messages),
("in_path_error_messages", in_path_error_messages),
("in_path_tear_messages", in_path_tear_messages),
("in_reservation_messages", in_reservation_messages),
("in_reservation_error_messages", in_reservation_error_messages),
("in_reservation_tear_messages", in_reservation_tear_messages),
("in_hello_messages", in_hello_messages),
("in_srefresh_messages", in_srefresh_messages),
("in_ack_messages", in_ack_messages),
("out_path_messages", out_path_messages),
("out_path_error_messages", out_path_error_messages),
("out_path_tear_messages", out_path_tear_messages),
("out_reservation_messages", out_reservation_messages),
("out_reservation_error_messages", out_reservation_error_messages),
("out_reservation_tear_messages", out_reservation_tear_messages),
("out_hello_messages", out_hello_messages),
("out_srefresh_messages", out_srefresh_messages),
("out_ack_messages", out_ack_messages),
]
)
| 45.636179
| 471
| 0.620829
| 16,422
| 157,673
| 5.652296
| 0.011874
| 0.059145
| 0.04374
| 0.04876
| 0.99596
| 0.993622
| 0.993622
| 0.993622
| 0.993622
| 0.993622
| 0
| 0.034608
| 0.284189
| 157,673
| 3,454
| 472
| 45.649392
| 0.787815
| 0.193648
| 0
| 0.908696
| 0
| 0.013043
| 0.29077
| 0.148563
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04058
| false
| 0
| 0.005435
| 0
| 0.077174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c88e11b498c86608a077788e91de61bd3391106
| 58,981
|
py
|
Python
|
auto_process_ngs/test/commands/test_publish_qc_cmd.py
|
fls-bioinformatics-core/auto_process_ngs
|
1f07a08e14f118e6a61d3f37130515efc6049dd7
|
[
"AFL-3.0"
] | 5
|
2017-01-31T21:37:09.000Z
|
2022-03-17T19:26:29.000Z
|
auto_process_ngs/test/commands/test_publish_qc_cmd.py
|
fls-bioinformatics-core/auto_process_ngs
|
1f07a08e14f118e6a61d3f37130515efc6049dd7
|
[
"AFL-3.0"
] | 294
|
2015-08-14T09:00:30.000Z
|
2022-03-18T10:17:05.000Z
|
auto_process_ngs/test/commands/test_publish_qc_cmd.py
|
fls-bioinformatics-core/auto_process_ngs
|
1f07a08e14f118e6a61d3f37130515efc6049dd7
|
[
"AFL-3.0"
] | 7
|
2017-11-23T07:52:21.000Z
|
2020-07-15T10:12:05.000Z
|
#######################################################################
# Tests for publish_qc_cmd.py module
#######################################################################
import unittest
import tempfile
import shutil
import os
from auto_process_ngs.settings import Settings
from auto_process_ngs.auto_processor import AutoProcess
from auto_process_ngs.mock import MockAnalysisDirFactory
from auto_process_ngs.mock import MockAnalysisProject
from auto_process_ngs.mock import UpdateAnalysisDir
from auto_process_ngs.mock import UpdateAnalysisProject
from auto_process_ngs.mock import MockMultiQC
from auto_process_ngs.commands.publish_qc_cmd import publish_qc
# Set to False to keep test output dirs
REMOVE_TEST_OUTPUTS = True
class TestAutoProcessPublishQc(unittest.TestCase):
"""
Tests for AutoProcess.publish_qc
"""
def setUp(self):
# Create a temp working dir
self.dirn = tempfile.mkdtemp(suffix='TestAutoProcessPublishQc')
# Create settings instance
# This allows us to set the polling interval for the
# unit tests
settings_ini = os.path.join(self.dirn,"auto_process.ini")
with open(settings_ini,'w') as s:
s.write("""[general]
poll_interval = 0.5
""")
self.settings = Settings(settings_ini)
# Create a temp 'bin' dir
self.bin = os.path.join(self.dirn,"bin")
os.mkdir(self.bin)
# Store original location so we can get back at the end
self.pwd = os.getcwd()
# Store original PATH
self.path = os.environ['PATH']
# Move to working dir
os.chdir(self.dirn)
# Placeholders for test objects
self.ap = None
def tearDown(self):
# Delete autoprocessor object
if self.ap is not None:
del(self.ap)
# Return to original dir
os.chdir(self.pwd)
# Remove the temporary test directory
if REMOVE_TEST_OUTPUTS:
shutil.rmtree(self.dirn)
def test_publish_qc_metadata_missing(self):
"""publish_qc: raise exception if metadata not set
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_M00879_0087_000000000-AGEW9',
'miseq',
metadata={ "instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
self.assertRaises(Exception,
publish_qc,
ap,
location=publication_dir)
def test_publish_qc_processing_qc(self):
"""publish_qc: processing QC report only
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report
UpdateAnalysisDir(ap).add_processing_report()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish QC
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ("index.html",
"processing_qc.html")
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_multiple_processing_qc(self):
"""publish_qc: handle multiple processing QC reports
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add multiple processing reports with non-standard names
UpdateAnalysisDir(ap).add_processing_report("processing_qc_v1.html")
UpdateAnalysisDir(ap).add_processing_report("processing_qc_v2.html")
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish QC
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ("index.html",
"processing_qc_v1.html",
"processing_qc_v2.html",)
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_barcode_analysis(self):
"""publish_qc: barcode analysis outputs
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing and barcode analysis reports
UpdateAnalysisDir(ap).add_processing_report()
UpdateAnalysisDir(ap).add_barcode_analysis()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish QC
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ("index.html",
"processing_qc.html",
os.path.join("barcodes","barcodes.report"),
os.path.join("barcodes","barcodes.xls"),
os.path.join("barcodes","barcodes.html"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_multiple_barcode_analyses(self):
"""publish_qc: handle multiple barcode analyses outputs
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing and multiple barcode analysis reports
UpdateAnalysisDir(ap).add_processing_report()
UpdateAnalysisDir(ap).add_barcode_analysis("barcode_analysis_v1")
UpdateAnalysisDir(ap).add_barcode_analysis("barcode_analysis_v2")
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish QC
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ("index.html",
"processing_qc.html",
os.path.join("barcode_analysis_v1","barcodes.report"),
os.path.join("barcode_analysis_v1","barcodes.xls"),
os.path.join("barcode_analysis_v1","barcodes.html"),
os.path.join("barcode_analysis_v2","barcodes.report"),
os.path.join("barcode_analysis_v2","barcodes.xls"),
os.path.join("barcode_analysis_v2","barcodes.html"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_projects_no_multiqc(self):
"""publish_qc: projects with QC outputs (no MultiQC)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs(
include_multiqc=False)
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
self.assertTrue(
os.path.exists(
os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
"processing_qc.html")))
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_projects(self):
"""publish_qc: projects with all QC outputs
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_projects_old_zip_names(self):
"""publish_qc: projects with all QC outputs (old-style ZIP names)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs(legacy_zip_name=True)
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs with old-style ZIP file names
project_qc = "qc_report.%s.%s" % (project.name,
os.path.basename(
ap.analysis_dir))
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_projects_no_reports(self):
"""publish_qc: projects with all QC outputs but no reports
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Remove the QC reports
for project in ap.get_analysis_projects():
qc_reports = []
qc_reports.append("qc_report.%s.%s.zip" %
(project.name,
project.info.run))
qc_reports.append("qc_report.html")
qc_reports.append("multiqc_report.html")
for f in qc_reports:
os.remove(os.path.join(project.dirn,f))
# Make a mock multiqc
MockMultiQC.create(os.path.join(self.bin,"multiqc"))
os.environ['PATH'] = "%s:%s" % (self.bin,
os.environ['PATH'])
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_projects_with_multiple_fastq_sets(self):
"""publish_qc: projects with multiple Fastq sets
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Add additional fastq set for first project
multi_fastqs_project = ap.get_analysis_projects()[0]
UpdateAnalysisProject(multi_fastqs_project).add_fastq_set(
"fastqs.extra",
("Alt1.r1.fastq.gz","Alt2.r1.fastq.gz"))
UpdateAnalysisProject(multi_fastqs_project).add_qc_outputs(
fastq_set="fastqs.extra",
qc_dir="qc.extra")
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
# Additional QC for second fastq set in first project
project_qc = "qc.extra_report.%s.%s" % (multi_fastqs_project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc.extra_report.html"))
outputs.append(os.path.join(project_qc,"qc.extra"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_project_missing_qc(self):
"""publish_qc: raises exception if project has missing QC
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report
UpdateAnalysisDir(ap).add_processing_report()
# Add QC outputs for subset of projects
projects = ap.get_analysis_projects()[1:]
for project in projects:
UpdateAnalysisProject(project).add_qc_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
self.assertRaises(Exception,
publish_qc,
ap,
location=publication_dir)
def test_publish_qc_ignore_project_missing_qc(self):
"""publish_qc: skip project with missing QC
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report
UpdateAnalysisDir(ap).add_processing_report()
# Add QC outputs for subset of projects
projects = ap.get_analysis_projects()
missing_project = projects[0]
projects = projects[1:]
for project in projects:
UpdateAnalysisProject(project).add_qc_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,
ignore_missing_qc=True)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in projects:
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
# Check that missing project wasn't copied
self.assertFalse(os.path.exists(
os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
"qc_report.%s.%s" % (missing_project.name,
os.path.basename(
ap.analysis_dir)))))
def test_publish_qc_ignore_additional_project_dir(self):
"""publish_qc: ignore project-like directory not in projects.info
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report
UpdateAnalysisDir(ap).add_processing_report()
# Add QC outputs for subset of projects
projects = ap.get_analysis_projects()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Make an additional project-like directory
project_like = MockAnalysisProject('additional_dir',
('random1_R1.fastq',
'random2_R1.fastq',))
project_like.create(top_dir=mockdir.dirn)
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in projects:
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_subset_of_projects(self):
"""publish_qc: only publish subset of projects
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report
UpdateAnalysisDir(ap).add_processing_report()
# Add QC outputs for subset of projects
projects = ap.get_analysis_projects()
missing_projects = projects[1:]
projects = projects[0:1]
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,
projects="AB*")
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in projects:
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
# Check that missing projects weren't copied
for project in missing_projects:
self.assertFalse(os.path.exists(
os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
"qc_report.%s.%s" % (project.name,
project.info.run))),
"%s exists in final dir, but shouldn't" %
project.name)
def test_publish_qc_with_icell8_outputs(self):
"""publish_qc: project with ICELL8 QC outputs
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
projects = ap.get_analysis_projects()
for project in projects:
UpdateAnalysisProject(project).add_qc_outputs(
protocol="singlecell")
# Add ICELL8 report for one project
icell8_project = projects[0]
UpdateAnalysisProject(icell8_project).add_icell8_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
# ICELL8 outputs shouldn't be present
icell8_dir = "icell8_processing.%s.%s" % (icell8_project.name,
os.path.basename(
ap.analysis_dir))
outputs = []
outputs.append(icell8_dir)
outputs.append("%s.zip" % icell8_dir)
outputs.append(os.path.join(icell8_dir,"icell8_processing_data"))
outputs.append(os.path.join(icell8_dir,"icell8_processing.html"))
outputs.append(os.path.join(icell8_dir,"stats"))
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertFalse(os.path.exists(f),"Found %s" % f)
def test_publish_qc_with_icell8_outputs_legacy_mode(self):
"""publish_qc: project with ICELL8 QC outputs (legacy mode)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
projects = ap.get_analysis_projects()
for project in projects:
UpdateAnalysisProject(project).add_qc_outputs(
protocol="singlecell")
# Add ICELL8 report for one project
icell8_project = projects[0]
UpdateAnalysisProject(icell8_project).add_icell8_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,legacy=True)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
# ICELL8 outputs
icell8_dir = "icell8_processing.%s.%s" % (icell8_project.name,
os.path.basename(
ap.analysis_dir))
outputs.append(icell8_dir)
outputs.append("%s.zip" % icell8_dir)
outputs.append(os.path.join(icell8_dir,"icell8_processing_data"))
outputs.append(os.path.join(icell8_dir,"icell8_processing.html"))
outputs.append(os.path.join(icell8_dir,"stats"))
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_cellranger_qc(self):
"""publish_qc: publish cellranger QC output (whole flowcell)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing and cellranger QC reports
UpdateAnalysisDir(ap).add_processing_report()
UpdateAnalysisDir(ap).add_cellranger_qc_output()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html",
"cellranger_qc_summary.html"]
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_cellranger_qc_lanes_subset(self):
"""publish_qc: publish cellranger QC output (subset of lanes)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing and cellranger QC reports
UpdateAnalysisDir(ap).add_processing_report()
UpdateAnalysisDir(ap).add_cellranger_qc_output(lanes="45")
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html",
"cellranger_qc_summary_45.html"]
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_cellranger_qc_multiple_lanes_subsets(self):
"""publish_qc: publish cellranger QC output (multiple subsets of lanes)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing and cellranger QC reports
UpdateAnalysisDir(ap).add_processing_report()
UpdateAnalysisDir(ap).add_cellranger_qc_output(lanes="45")
UpdateAnalysisDir(ap).add_cellranger_qc_output(lanes="78")
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html",
"cellranger_qc_summary_45.html",
"cellranger_qc_summary_78.html"]
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_multiple_10x_mkfastq_qc(self):
"""publish_qc: publish 10xGenomics mkfastq QC output (multiple packages)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create(no_project_dirs=True)
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing and cellranger QC reports
UpdateAnalysisDir(ap).add_processing_report()
UpdateAnalysisDir(ap).add_10x_mkfastq_qc_output("cellranger-arc",
lanes="12")
UpdateAnalysisDir(ap).add_10x_mkfastq_qc_output("spaceranger",
lanes="3")
UpdateAnalysisDir(ap).add_10x_mkfastq_qc_output("cellranger-atac",
lanes="45")
UpdateAnalysisDir(ap).add_10x_mkfastq_qc_output("cellranger",
lanes="78")
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html",
"cellranger-arc_qc_summary_12.html",
"spaceranger_qc_summary_3.html",
"cellranger-atac_qc_summary_45.html",
"cellranger_qc_summary_78.html"]
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_cellranger_count(self):
"""publish_qc: project with cellranger count output
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing and cellranger QC reports
UpdateAnalysisDir(ap).add_processing_report()
UpdateAnalysisDir(ap).add_cellranger_qc_output()
# Add QC outputs
projects = ap.get_analysis_projects()
for project in projects:
UpdateAnalysisProject(project).add_qc_outputs(
protocol="singlecell")
# Add cellranger count output for one project
tenxgenomics_project = projects[0]
UpdateAnalysisProject(tenxgenomics_project).add_cellranger_count_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir)
# Check outputs
outputs = ["index.html",
"processing_qc.html",
"cellranger_qc_summary.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
# NB cellranger count outputs shouldn't be present
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_cellranger_count_legacy_mode(self):
"""publish_qc: project with cellranger count output (legacy mode)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing and cellranger QC reports
UpdateAnalysisDir(ap).add_processing_report()
UpdateAnalysisDir(ap).add_cellranger_qc_output()
# Add QC outputs
projects = ap.get_analysis_projects()
for project in projects:
UpdateAnalysisProject(project).add_qc_outputs(
protocol="singlecell")
# Add cellranger count output for one project
tenxgenomics_project = projects[0]
UpdateAnalysisProject(tenxgenomics_project).add_cellranger_count_outputs(legacy=True)
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,legacy=True)
# Check outputs
outputs = ["index.html",
"processing_qc.html",
"cellranger_qc_summary.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
# Cellranger count outputs
cellranger_count_dir = "cellranger_count_report.%s.%s" % (
tenxgenomics_project.name,
os.path.basename(ap.analysis_dir))
outputs.append(cellranger_count_dir)
outputs.append("%s.zip" % cellranger_count_dir)
outputs.append(os.path.join(cellranger_count_dir,
"cellranger_count_report.html"))
outputs.append(os.path.join(cellranger_count_dir,"cellranger_count"))
for sample in tenxgenomics_project.samples:
outputs.append(os.path.join(cellranger_count_dir,
"cellranger_count",
sample.name,
"outs",
"web_summary.html"))
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_use_hierarchy(self):
"""publish_qc: publish using YEAR/PLATFORM hierarchy
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,
use_hierarchy=True)
# Check outputs
final_dir = os.path.join(publication_dir,
"2016",
"hiseq")
self.assertTrue(os.path.exists(final_dir))
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(final_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_exclude_zip_files(self):
"""publish_qc: exclude ZIP files from publication
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
projects = ap.get_analysis_projects()
for project in projects:
UpdateAnalysisProject(project).add_qc_outputs()
# Add ICELL8 report for one project
icell8_project = projects[0]
UpdateAnalysisProject(icell8_project).add_icell8_outputs()
# Add cellranger count output for one project
tenxgenomics_project = projects[-1]
UpdateAnalysisProject(tenxgenomics_project).add_cellranger_count_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,
exclude_zip_files=True)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
zip_files = []
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
zip_files.append("%s.zip" % project_qc)
# NB ICELL8 and cellranger count outputs shouldn't be present
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
# Check the ZIP files were excluded
for zip_file in zip_files:
self.assertFalse(os.path.exists(
os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
zip_file)),
"ZIP file '%s' exists, but shouldn't" %
zip_file)
def test_publish_qc_exclude_zip_files_legacy_mode(self):
"""publish_qc: exclude ZIP files from publication (legacy mode)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
projects = ap.get_analysis_projects()
for project in projects:
UpdateAnalysisProject(project).add_qc_outputs()
# Add ICELL8 report for one project
icell8_project = projects[0]
UpdateAnalysisProject(icell8_project).add_icell8_outputs()
# Add cellranger count output for one project
tenxgenomics_project = projects[-1]
UpdateAnalysisProject(tenxgenomics_project).add_cellranger_count_outputs(legacy=True)
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,
exclude_zip_files=True,
legacy=True)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
zip_files = []
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
zip_files.append("%s.zip" % project_qc)
# ICELL8 outputs
icell8_dir = "icell8_processing.%s.%s" % (icell8_project.name,
os.path.basename(
ap.analysis_dir))
outputs.append(icell8_dir)
outputs.append(os.path.join(icell8_dir,"icell8_processing_data"))
outputs.append(os.path.join(icell8_dir,"icell8_processing.html"))
outputs.append(os.path.join(icell8_dir,"stats"))
zip_files.append("%s.zip" % icell8_dir)
# Cellranger count outputs
cellranger_count_dir = "cellranger_count_report.%s.%s" % (
tenxgenomics_project.name,
os.path.basename(ap.analysis_dir))
outputs.append(cellranger_count_dir)
outputs.append(os.path.join(cellranger_count_dir,
"cellranger_count_report.html"))
outputs.append(os.path.join(cellranger_count_dir,"cellranger_count"))
for sample in tenxgenomics_project.samples:
outputs.append(os.path.join(cellranger_count_dir,
"cellranger_count",
sample.name,
"outs",
"web_summary.html"))
zip_files.append("%s.zip" % cellranger_count_dir)
# Do checks
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
# Check the ZIP files were excluded
for zip_file in zip_files:
self.assertFalse(os.path.exists(
os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
zip_file)),
"ZIP file '%s' exists, but shouldn't" %
zip_file)
def test_publish_qc_missing_destination(self):
"""publish_qc: raise exception if destination doesn't exist
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Reference publication area which doesn't exist
publication_dir = os.path.join(self.dirn,'QC')
self.assertFalse(os.path.exists(publication_dir))
# Publish
self.assertRaises(Exception,
publish_qc,
ap,
location=publication_dir)
self.assertFalse(os.path.exists(publication_dir))
def test_publish_qc_handles_four_digit_year_in_datestamp(self):
"""publish_qc: handle 4-digit year in datestamp using YEAR/PLATFORM hierarchy
"""
# Make an auto-process directory with 4-digit year datestamp
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'20160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "20160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,
use_hierarchy=True)
# Check outputs
final_dir = os.path.join(publication_dir,
"2016",
"hiseq")
self.assertTrue(os.path.exists(final_dir))
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
for item in outputs:
f = os.path.join(final_dir,
"20160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
def test_publish_qc_with_projects_legacy_mode(self):
"""publish_qc: projects with all QC outputs (legacy mode)
"""
# Make an auto-process directory
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_K00879_0087_000000000-AGEW9',
'hiseq',
metadata={ "run_number": 87,
"source": "local",
"instrument_datestamp": "160621" },
top_dir=self.dirn)
mockdir.create()
ap = AutoProcess(mockdir.dirn,
settings=self.settings)
# Add processing report and QC outputs
UpdateAnalysisDir(ap).add_processing_report()
for project in ap.get_analysis_projects():
UpdateAnalysisProject(project).add_qc_outputs()
# Make a mock publication area
publication_dir = os.path.join(self.dirn,'QC')
os.mkdir(publication_dir)
# Publish
publish_qc(ap,location=publication_dir,legacy=True)
# Check outputs
outputs = ["index.html",
"processing_qc.html"]
for project in ap.get_analysis_projects():
# Standard QC outputs
project_qc = "qc_report.%s.%s" % (project.name,
project.info.run)
outputs.append(project_qc)
outputs.append("%s.zip" % project_qc)
outputs.append(os.path.join(project_qc,"qc_report.html"))
outputs.append(os.path.join(project_qc,"qc"))
# MultiQC output
outputs.append("multiqc_report.%s.html" % project.name)
for item in outputs:
f = os.path.join(publication_dir,
"160621_K00879_0087_000000000-AGEW9_analysis",
item)
self.assertTrue(os.path.exists(f),"Missing %s" % f)
| 44.547583
| 93
| 0.56313
| 6,049
| 58,981
| 5.278724
| 0.042982
| 0.032508
| 0.040086
| 0.045849
| 0.907519
| 0.888165
| 0.877642
| 0.852682
| 0.834362
| 0.828599
| 0
| 0.050797
| 0.341466
| 58,981
| 1,323
| 94
| 44.581255
| 0.771298
| 0.119547
| 0
| 0.85098
| 0
| 0
| 0.141182
| 0.061723
| 0
| 0
| 0
| 0
| 0.040196
| 1
| 0.029412
| false
| 0
| 0.011765
| 0
| 0.042157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e2dce0f147027c2e4e64afd841859fb1d3874263
| 67,183
|
py
|
Python
|
speechcorpusy/presets/zr19/zr19_items_voice.py
|
tarepan/corpuspy
|
d5c07fc1f12ee2fb2f2ea00419728c39d5b354e4
|
[
"MIT"
] | 1
|
2022-01-10T13:58:56.000Z
|
2022-01-10T13:58:56.000Z
|
speechcorpusy/presets/zr19/zr19_items_voice.py
|
tarepan/speechcorpusy
|
d5c07fc1f12ee2fb2f2ea00419728c39d5b354e4
|
[
"MIT"
] | null | null | null |
speechcorpusy/presets/zr19/zr19_items_voice.py
|
tarepan/speechcorpusy
|
d5c07fc1f12ee2fb2f2ea00419728c39d5b354e4
|
[
"MIT"
] | null | null | null |
from typing import List
utterances_voice: List[str] = ['V001_0001595577', 'V001_0001866840', 'V001_0007444691', 'V001_0007474540', 'V001_0009509470', 'V001_0013388284', 'V001_0015024396', 'V001_0015237745', 'V001_0015448446', 'V001_0023671762', 'V001_0027607622', 'V001_0042979613', 'V001_0044037376', 'V001_0050336874', 'V001_0056720896', 'V001_0057841840', 'V001_0066512081', 'V001_0066979256', 'V001_0067148519', 'V001_0076470331', 'V001_0079628035', 'V001_0081091349', 'V001_0082424587', 'V001_0090264421', 'V001_0090466478', 'V001_0102160029', 'V001_0102424554', 'V001_0103204922', 'V001_0113074863', 'V001_0122702237', 'V001_0130962843', 'V001_0137028902', 'V001_0142064545', 'V001_0147340524', 'V001_0150356835', 'V001_0155527503', 'V001_0164084310', 'V001_0165487895', 'V001_0166667503', 'V001_0174503071', 'V001_0174811571', 'V001_0175375526', 'V001_0184421310', 'V001_0187759645', 'V001_0189456059', 'V001_0195290783', 'V001_0197388910', 'V001_0203037851', 'V001_0203350329', 'V001_0204913653', 'V001_0205791134', 'V001_0217494114', 'V001_0218475522', 'V001_0223743417', 'V001_0225505877', 'V001_0229884875', 'V001_0233680765', 'V001_0235253106', 'V001_0242047039', 'V001_0242268674', 'V001_0243557067', 'V001_0248677296', 'V001_0249585391', 'V001_0250767462', 'V001_0254467696', 'V001_0256553266', 'V001_0263250234', 'V001_0270622643', 'V001_0281264757', 'V001_0282430278', 'V001_0282775118', 'V001_0286385147', 'V001_0287445499', 'V001_0290110102', 'V001_0297854108', 'V001_0299206178', 'V001_0301052210', 'V001_0301850200', 'V001_0305251796', 'V001_0315686710', 'V001_0316133702', 'V001_0317340607', 'V001_0318975729', 'V001_0323872500', 'V001_0330655004', 'V001_0331818368', 'V001_0332143419', 'V001_0334575358', 'V001_0337583034', 'V001_0338734154', 'V001_0338903819', 'V001_0342933341', 'V001_0345340158', 'V001_0359230989', 'V001_0360196535', 'V001_0365689819', 'V001_0368521196', 'V001_0370051977', 'V001_0370783349', 'V001_0373331421', 'V001_0376475355', 'V001_0380022446', 'V001_0385451422', 'V001_0387490903', 'V001_0388374362', 'V001_0389080064', 'V001_0390246106', 'V001_0396480621', 'V001_0407441502', 'V001_0410326924', 'V001_0422377661', 'V001_0425622106', 'V001_0429663090', 'V001_0431723770', 'V001_0432309827', 'V001_0435520955', 'V001_0439506844', 'V001_0440758804', 'V001_0443194183', 'V001_0443618782', 'V001_0446014451', 'V001_0453797137', 'V001_0454693738', 'V001_0457403470', 'V001_0464833869', 'V001_0465308759', 'V001_0471326928', 'V001_0471505765', 'V001_0474307499', 'V001_0478287100', 'V001_0484713271', 'V001_0486288353', 'V001_0493645630', 'V001_0496918598', 'V001_0497662549', 'V001_0501671768', 'V001_0501897074', 'V001_0504709122', 'V001_0506130464', 'V001_0513319803', 'V001_0514324215', 'V001_0518577001', 'V001_0522929669', 'V001_0524721581', 'V001_0524964564', 'V001_0534442179', 'V001_0554070015', 'V001_0561838490', 'V001_0569577945', 'V001_0579626256', 'V001_0585893794', 'V001_0589306806', 'V001_0589664000', 'V001_0590151855', 'V001_0591776454', 'V001_0595077042', 'V001_0601808311', 'V001_0603471861', 'V001_0605326797', 'V001_0606679255', 'V001_0607862540', 'V001_0613438024', 'V001_0614608762', 'V001_0615303209', 'V001_0617108393', 'V001_0618457634', 'V001_0621590541', 'V001_0622310476', 'V001_0626608512', 'V001_0630814381', 'V001_0635218511', 'V001_0641090057', 'V001_0646788952', 'V001_0649283978', 'V001_0655284234', 'V001_0660827439', 'V001_0672830622', 'V001_0672887485', 'V001_0683757500', 'V001_0686024443', 'V001_0689017760', 'V001_0691550756', 'V001_0707858005', 'V001_0710575182', 'V001_0713101078', 'V001_0723226760', 'V001_0724241984', 'V001_0729380762', 'V001_0731650821', 'V001_0735213352', 'V001_0738039735', 'V001_0745050372', 'V001_0746128950', 'V001_0752840717', 'V001_0757361483', 'V001_0760060410', 'V001_0762042172', 'V001_0765891496', 'V001_0772741603', 'V001_0775065458', 'V001_0787370297', 'V001_0791810509', 'V001_0793735645', 'V001_0794353633', 'V001_0796454336', 'V001_0802261053', 'V001_0802690584', 'V001_0804144017', 'V001_0806589268', 'V001_0813877990', 'V001_0817110407', 'V001_0817833592', 'V001_0819807995', 'V001_0819843395', 'V001_0820664325', 'V001_0822167947', 'V001_0823125580', 'V001_0834905464', 'V001_0839204047', 'V001_0843225240', 'V001_0844622030', 'V001_0847629801', 'V001_0851494693', 'V001_0856757131', 'V001_0864410956', 'V001_0864424259', 'V001_0865569881', 'V001_0865807276', 'V001_0868874084', 'V001_0869977387', 'V001_0880351352', 'V001_0881457952', 'V001_0885292946', 'V001_0885334010', 'V001_0891577441', 'V001_0892743242', 'V001_0893170349', 'V001_0897531699', 'V001_0905471576', 'V001_0915724006', 'V001_0917895079', 'V001_0921140492', 'V001_0924426691', 'V001_0924575384', 'V001_0928372245', 'V001_0944401513', 'V001_0944754032', 'V001_0948445747', 'V001_0949863468', 'V001_0955944686', 'V001_0970201967', 'V001_0976457124', 'V001_0977650049', 'V001_0985782879', 'V001_0989813156', 'V001_0994369173', 'V001_0995418107', 'V001_0998977512', 'V001_1000617835', 'V001_1001530848', 'V001_1010346691', 'V001_1012447178', 'V001_1014575758', 'V001_1014961990', 'V001_1036103882', 'V001_1036416284', 'V001_1043534355', 'V001_1053935052', 'V001_1055499477', 'V001_1061192918', 'V001_1079599785', 'V001_1080736895', 'V001_1091145361', 'V001_1093567940', 'V001_1095991604', 'V001_1099160059', 'V001_1102969332', 'V001_1107043308', 'V001_1107206129', 'V001_1109105032', 'V001_1113844859', 'V001_1116406951', 'V001_1118365677', 'V001_1120383217', 'V001_1120992059', 'V001_1122394794', 'V001_1127547788', 'V001_1145465734', 'V001_1152771818', 'V001_1162355114', 'V001_1168577557', 'V001_1172895858', 'V001_1174536841', 'V001_1178266391', 'V001_1181001025', 'V001_1181589202', 'V001_1182248031', 'V001_1184474909', 'V001_1185862648', 'V001_1189549524', 'V001_1193874368', 'V001_1209405512', 'V001_1212784022', 'V001_1213571064', 'V001_1213802328', 'V001_1222035630', 'V001_1222724842', 'V001_1226192853', 'V001_1241866685', 'V001_1242262643', 'V001_1243258905', 'V001_1245061105', 'V001_1246984097', 'V001_1249972158', 'V001_1253594703', 'V001_1255631875', 'V001_1255975184', 'V001_1258912176', 'V001_1263590383', 'V001_1278606148', 'V001_1284057493', 'V001_1290361888', 'V001_1291424376', 'V001_1301964061', 'V001_1303989345', 'V001_1306362927', 'V001_1308765401', 'V001_1309767435', 'V001_1315400837', 'V001_1316043617', 'V001_1317300519', 'V001_1317525694', 'V001_1318613369', 'V001_1339396446', 'V001_1345469369', 'V001_1346458608', 'V001_1355629422', 'V001_1364591848', 'V001_1365690136', 'V001_1378878854', 'V001_1380467913', 'V001_1384693041', 'V001_1385985043', 'V001_1394782236', 'V001_1396761240', 'V001_1397819159', 'V001_1402075747', 'V001_1402170554', 'V001_1402675255', 'V001_1403019722', 'V001_1403327838', 'V001_1404984904', 'V001_1405644694', 'V001_1414321626', 'V001_1416965467', 'V001_1417404052', 'V001_1418740442', 'V001_1420944476', 'V001_1421087667', 'V001_1426379457', 'V001_1437100785', 'V001_1440032772', 'V001_1441743051', 'V001_1447818130', 'V001_1457766131', 'V001_1458854586', 'V001_1464929765', 'V001_1465868729', 'V001_1466936857', 'V001_1468662963', 'V001_1471852574', 'V001_1474337957', 'V001_1478231810', 'V001_1479192943', 'V001_1480170717', 'V001_1482371585', 'V001_1485676218', 'V001_1489373140', 'V001_1489951582', 'V001_1494670325', 'V001_1503274995', 'V001_1504357110', 'V001_1507779709', 'V001_1519041762', 'V001_1519865617', 'V001_1526247976', 'V001_1527744190', 'V001_1527941372', 'V001_1532821448', 'V001_1540605454', 'V001_1543355947', 'V001_1543674908', 'V001_1552201519', 'V001_1558162966', 'V001_1562043191', 'V001_1566634287', 'V001_1574939213', 'V001_1576947828', 'V001_1580112419', 'V001_1595051534', 'V001_1595747436', 'V001_1598328507', 'V001_1598433572', 'V001_1607097829', 'V001_1608282859', 'V001_1615567440', 'V001_1615766461', 'V001_1616121797', 'V001_1619058935', 'V001_1620017108', 'V001_1626053114', 'V001_1628915371', 'V001_1643999384', 'V001_1649050418', 'V001_1666168798', 'V001_1666595741', 'V001_1674631006', 'V001_1680391864', 'V001_1681540512', 'V001_1688550276', 'V001_1690458177', 'V001_1694475421', 'V001_1696518298', 'V001_1702789653', 'V001_1709139520', 'V001_1713235908', 'V001_1725107896', 'V001_1726572568', 'V001_1729681071', 'V001_1731254777', 'V001_1738027122', 'V001_1744571103', 'V001_1752965697', 'V001_1756383089', 'V001_1759683169', 'V001_1765366119', 'V001_1778403456', 'V001_1779372660', 'V001_1780036223', 'V001_1787976667', 'V001_1802603014', 'V001_1803871055', 'V001_1813301413', 'V001_1814041133', 'V001_1821644151', 'V001_1824619273', 'V001_1827134409', 'V001_1834203511', 'V001_1844499086', 'V001_1848942941', 'V001_1849442375', 'V001_1849488313', 'V001_1852557757', 'V001_1861871603', 'V001_1863456858', 'V001_1864232598', 'V001_1867021789', 'V001_1868820827', 'V001_1881536428', 'V001_1883118701', 'V001_1890312342', 'V001_1891707238', 'V001_1893150910', 'V001_1895964439', 'V001_1900657035', 'V001_1903528862', 'V001_1903856481', 'V001_1911991358', 'V001_1913632097', 'V001_1916686802', 'V001_1919276011', 'V001_1922835504', 'V001_1923569760', 'V001_1941025171', 'V001_1942650635', 'V001_1945306029', 'V001_1952053734', 'V001_1953250263', 'V001_1959792110', 'V001_1959897087', 'V001_1987118982', 'V001_1994793066', 'V001_1999807740', 'V001_2000406525', 'V001_2003743532', 'V001_2006215017', 'V001_2007903874', 'V001_2010017309', 'V001_2017255715', 'V001_2024585086', 'V001_2027925760', 'V001_2050389747', 'V001_2051101210', 'V001_2053789847', 'V001_2060404385', 'V001_2062845034', 'V001_2066726644', 'V001_2070323030', 'V001_2077062784', 'V001_2078911253', 'V001_2079309765', 'V001_2086591050', 'V001_2093049844', 'V001_2093302742', 'V001_2096757921', 'V001_2103669410', 'V001_2117583577', 'V001_2121756508', 'V001_2137391920', 'V001_2148295010', 'V001_2154966125', 'V001_2157031148', 'V001_2158656140', 'V001_2159006255', 'V001_2161306710', 'V001_2163593377', 'V001_2164485115', 'V001_2164871863', 'V001_2174291477', 'V001_2175006017', 'V001_2179109326', 'V001_2191720562', 'V001_2192708585', 'V001_2196268478', 'V001_2210638863', 'V001_2210792981', 'V001_2220212593', 'V001_2230522989', 'V001_2235353164', 'V001_2238504703', 'V001_2239163757', 'V001_2253447906', 'V001_2255544531', 'V001_2263769833', 'V001_2269132111', 'V001_2269482139', 'V001_2271650664', 'V001_2273698242', 'V001_2284055746', 'V001_2291550538', 'V001_2294261976', 'V001_2295056546', 'V001_2296774013', 'V001_2301143286', 'V001_2303447299', 'V001_2304110445', 'V001_2305602765', 'V001_2315884461', 'V001_2318569948', 'V001_2318974574', 'V001_2321601147', 'V001_2322472599', 'V001_2331157149', 'V001_2338915574', 'V001_2339568249', 'V001_2345176574', 'V001_2348413507', 'V001_2349169008', 'V001_2352435635', 'V001_2353854430', 'V001_2354050721', 'V001_2361211123', 'V001_2365637534', 'V001_2374641997', 'V001_2375916973', 'V001_2382717516', 'V001_2384284688', 'V001_2387380094', 'V001_2418029924', 'V001_2419208170', 'V001_2420803729', 'V001_2425732744', 'V001_2425958949', 'V001_2427275751', 'V001_2430139494', 'V001_2431773895', 'V001_2436171635', 'V001_2445532880', 'V001_2450202892', 'V001_2465882731', 'V001_2466065707', 'V001_2468503854', 'V001_2470264805', 'V001_2471827570', 'V001_2478085673', 'V001_2478113430', 'V001_2480550714', 'V001_2484066096', 'V001_2486694050', 'V001_2503019196', 'V001_2504665644', 'V001_2510228131', 'V001_2512065112', 'V001_2513304327', 'V001_2524804713', 'V001_2526793987', 'V001_2530102345', 'V001_2531790894', 'V001_2531900742', 'V001_2532318566', 'V001_2542515701', 'V001_2543823007', 'V001_2544217993', 'V001_2551672400', 'V001_2555765582', 'V001_2562033918', 'V001_2571308698', 'V001_2571648428', 'V001_2573995607', 'V001_2574015618', 'V001_2577993517', 'V001_2581998363', 'V001_2585856352', 'V001_2588064628', 'V001_2590553018', 'V001_2591579247', 'V001_2597999904', 'V001_2598275636', 'V001_2599531301', 'V001_2601828347', 'V001_2606482222', 'V001_2619286544', 'V001_2621423371', 'V001_2624757692', 'V001_2631131874', 'V001_2633603642', 'V001_2635794405', 'V001_2636762893', 'V001_2642551485', 'V001_2667488300', 'V001_2668601564', 'V001_2668747272', 'V001_2675053669', 'V001_2676388556', 'V001_2681494142', 'V001_2682698985', 'V001_2684589315', 'V001_2686994652', 'V001_2689223445', 'V001_2693584783', 'V001_2703426768', 'V001_2710374106', 'V001_2718008236', 'V001_2722106466', 'V001_2722262865', 'V001_2725979106', 'V001_2733137174', 'V001_2733959806', 'V001_2737981193', 'V001_2738662781', 'V001_2746690861', 'V001_2746960744', 'V001_2752177955', 'V001_2754634889', 'V001_2764481950', 'V001_2766337521', 'V001_2775380862', 'V001_2782264414', 'V001_2789068397', 'V001_2792026620', 'V001_2795286271', 'V001_2795299044', 'V001_2804655007', 'V001_2805330041', 'V001_2814775052', 'V001_2817465453', 'V001_2820665568', 'V001_2822705958', 'V001_2824544572', 'V001_2829218647', 'V001_2836260237', 'V001_2837311767', 'V001_2848357983', 'V001_2852459483', 'V001_2853058925', 'V001_2853691968', 'V001_2854013458', 'V001_2861497824', 'V001_2864319996', 'V001_2873702492', 'V001_2876825014', 'V001_2880127678', 'V001_2883711290', 'V001_2903744350', 'V001_2908398034', 'V001_2912379675', 'V001_2913105747', 'V001_2936174871', 'V001_2950536470', 'V001_2952985819', 'V001_2953220968', 'V001_2954989064', 'V001_2956662172', 'V001_2963473103', 'V001_2965220019', 'V001_2968146949', 'V001_2968372488', 'V001_2968576684', 'V001_2970806417', 'V001_2975395372', 'V001_2983307977', 'V001_2987130046', 'V001_2988241961', 'V001_2990029684', 'V001_2993311702', 'V001_3000815821', 'V001_3010709005', 'V001_3021768464', 'V001_3022437538', 'V001_3034153592', 'V001_3040826029', 'V001_3044874208', 'V001_3049187244', 'V001_3061029125', 'V001_3061190038', 'V001_3061860788', 'V001_3065240666', 'V001_3067089596', 'V001_3075006266', 'V001_3075592813', 'V001_3076254852', 'V001_3085535718', 'V001_3088439980', 'V001_3088868402', 'V001_3090182841', 'V001_3093995123', 'V001_3105885499', 'V001_3106781777', 'V001_3108356792', 'V001_3113984620', 'V001_3115314541', 'V001_3123508231', 'V001_3127941645', 'V001_3131019261', 'V001_3133535195', 'V001_3134107283', 'V001_3140332964', 'V001_3144664947', 'V001_3144783778', 'V001_3149258927', 'V001_3155309099', 'V001_3158975468', 'V001_3161643839', 'V001_3166531399', 'V001_3174816960', 'V001_3177411581', 'V001_3195187774', 'V001_3196860310', 'V001_3198584074', 'V001_3200036066', 'V001_3206854193', 'V001_3218809027', 'V001_3219679220', 'V001_3220671386', 'V001_3221615035', 'V001_3234704601', 'V001_3242033515', 'V001_3244670899', 'V001_3246323844', 'V001_3246520385', 'V001_3254087620', 'V001_3255805392', 'V001_3258229764', 'V001_3262144730', 'V001_3268296032', 'V001_3268758627', 'V001_3273596352', 'V001_3276246151', 'V001_3283379962', 'V001_3291089740', 'V001_3302205706', 'V001_3315841451', 'V001_3319975309', 'V001_3324057110', 'V001_3335759378', 'V001_3338175019', 'V001_3339935365', 'V001_3344089625', 'V001_3348827323', 'V001_3358267826', 'V001_3358318607', 'V001_3360891828', 'V001_3362000176', 'V001_3369178674', 'V001_3371410156', 'V001_3371615214', 'V001_3376346158', 'V001_3378555711', 'V001_3380228455', 'V001_3386514750', 'V001_3399518181', 'V001_3402351874', 'V001_3416181300', 'V001_3416877769', 'V001_3417570918', 'V001_3423850707', 'V001_3434886459', 'V001_3435331083', 'V001_3446897960', 'V001_3447400670', 'V001_3452632075', 'V001_3458230176', 'V001_3465066135', 'V001_3477881348', 'V001_3478932658', 'V001_3480673906', 'V001_3490141953', 'V001_3497952635', 'V001_3501160689', 'V001_3507075876', 'V001_3514668973', 'V001_3515378294', 'V001_3518568053', 'V001_3526321290', 'V001_3532631130', 'V001_3533528268', 'V001_3533990767', 'V001_3535826231', 'V001_3541545904', 'V001_3546219688', 'V001_3546325716', 'V001_3546408301', 'V001_3549641825', 'V001_3551330567', 'V001_3551838178', 'V001_3552652864', 'V001_3566949606', 'V001_3571529813', 'V001_3573315047', 'V001_3581470516', 'V001_3583496504', 'V001_3584497669', 'V001_3598964354', 'V001_3621686905', 'V001_3642938334', 'V001_3650188603', 'V001_3653273080', 'V001_3654620530', 'V001_3657146942', 'V001_3657809719', 'V001_3657957083', 'V001_3659108503', 'V001_3662477920', 'V001_3663781862', 'V001_3664558271', 'V001_3666418768', 'V001_3668394355', 'V001_3669716563', 'V001_3677092329', 'V001_3678538692', 'V001_3683106063', 'V001_3685992413', 'V001_3689082562', 'V001_3695698065', 'V001_3699801589', 'V001_3701176650', 'V001_3713686866', 'V001_3720670192', 'V001_3722783960', 'V001_3728098639', 'V001_3734812627', 'V001_3737562111', 'V001_3742819560', 'V001_3747067579', 'V001_3755108033', 'V001_3756094442', 'V001_3768564882', 'V001_3777417644', 'V001_3785716288', 'V001_3787349820', 'V001_3788505902', 'V001_3790468536', 'V001_3799431069', 'V001_3816720384', 'V001_3824010597', 'V001_3836650344', 'V001_3839916182', 'V001_3840193135', 'V001_3849129465', 'V001_3852067735', 'V001_3853236201', 'V001_3864193492', 'V001_3874198739', 'V001_3876059880', 'V001_3876600641', 'V001_3877107690', 'V001_3887132983', 'V001_3890707702', 'V001_3896053382', 'V001_3900779960', 'V001_3907933089', 'V001_3908472331', 'V001_3915643578', 'V001_3918293282', 'V001_3932359358', 'V001_3940704981', 'V001_3947965845', 'V001_3951200581', 'V001_3967796026', 'V001_3978432895', 'V001_3985155168', 'V001_3986267930', 'V001_3987203426', 'V001_3990723360', 'V001_3997155080', 'V001_4002462105', 'V001_4007760477', 'V001_4007841806', 'V001_4008924114', 'V001_4012023942', 'V001_4017392730', 'V001_4017407109', 'V001_4023709209', 'V001_4030211509', 'V001_4036964705', 'V001_4047757206', 'V001_4051088253', 'V001_4051299740', 'V001_4051816784', 'V001_4057594958', 'V001_4057937620', 'V001_4060064523', 'V001_4064433227', 'V001_4069777750', 'V001_4070630289', 'V001_4072124213', 'V001_4079223400', 'V001_4079271661', 'V001_4081781030', 'V001_4082674886', 'V001_4084827760', 'V001_4088887341', 'V001_4094712461', 'V001_4106537555', 'V001_4108305704', 'V001_4109520080', 'V001_4121608852', 'V001_4123573285', 'V001_4123615245', 'V001_4124270229', 'V001_4126203506', 'V001_4140466495', 'V001_4140810952', 'V001_4143125103', 'V001_4150388452', 'V001_4166397541', 'V001_4166433678', 'V001_4168313428', 'V001_4171240588', 'V001_4186665473', 'V001_4206563024', 'V001_4210651782', 'V001_4211014210', 'V001_4212854092', 'V001_4217352919', 'V001_4221461357', 'V001_4221994104', 'V001_4228922642', 'V001_4242685496', 'V001_4246085636', 'V001_4252020755', 'V001_4256177243', 'V001_4257687012', 'V001_4263563083', 'V001_4268436562', 'V001_4275037623', 'V001_4281538605', 'V001_4282752676', 'V001_4285787970', 'V001_4294476796', 'V002_0002117105', 'V002_0002197842', 'V002_0002351352', 'V002_0005013175', 'V002_0005806529', 'V002_0005963746', 'V002_0005968346', 'V002_0008294818', 'V002_0013346298', 'V002_0014961474', 'V002_0016594761', 'V002_0017356861', 'V002_0017510577', 'V002_0025909428', 'V002_0028056175', 'V002_0031697464', 'V002_0036137910', 'V002_0036522165', 'V002_0036601326', 'V002_0036727289', 'V002_0036769621', 'V002_0038274016', 'V002_0041074828', 'V002_0041215200', 'V002_0043663205', 'V002_0044598956', 'V002_0044611685', 'V002_0045891992', 'V002_0047847237', 'V002_0051320612', 'V002_0055224451', 'V002_0055838495', 'V002_0056733784', 'V002_0064445695', 'V002_0068256109', 'V002_0069580879', 'V002_0070622010', 'V002_0073837469', 'V002_0075706038', 'V002_0078001868', 'V002_0079252973', 'V002_0085598440', 'V002_0087049906', 'V002_0090391700', 'V002_0090619198', 'V002_0092937161', 'V002_0097095475', 'V002_0101593979', 'V002_0106911269', 'V002_0107431198', 'V002_0112917882', 'V002_0115709607', 'V002_0115996876', 'V002_0121183294', 'V002_0121382115', 'V002_0122417225', 'V002_0123380436', 'V002_0123408672', 'V002_0124208650', 'V002_0124397757', 'V002_0124441734', 'V002_0124914110', 'V002_0125869728', 'V002_0126618208', 'V002_0126855989', 'V002_0127398143', 'V002_0133703245', 'V002_0135271190', 'V002_0136156827', 'V002_0136435326', 'V002_0139218607', 'V002_0140060551', 'V002_0140351983', 'V002_0142424668', 'V002_0144593922', 'V002_0146196795', 'V002_0146299823', 'V002_0147876111', 'V002_0151562134', 'V002_0152144234', 'V002_0152258078', 'V002_0154147830', 'V002_0154730408', 'V002_0157065483', 'V002_0157336679', 'V002_0157580375', 'V002_0159552230', 'V002_0161000359', 'V002_0162663606', 'V002_0164397242', 'V002_0165849227', 'V002_0170622078', 'V002_0171679647', 'V002_0173721677', 'V002_0174267330', 'V002_0178636554', 'V002_0179602451', 'V002_0180015264', 'V002_0181991042', 'V002_0183455535', 'V002_0183547088', 'V002_0183696354', 'V002_0184237617', 'V002_0186577160', 'V002_0187042771', 'V002_0187987638', 'V002_0190964592', 'V002_0192918923', 'V002_0193291397', 'V002_0193502579', 'V002_0197412723', 'V002_0200424013', 'V002_0201265131', 'V002_0201592886', 'V002_0201803447', 'V002_0202483114', 'V002_0203612717', 'V002_0206656089', 'V002_0207945637', 'V002_0208101104', 'V002_0208120899', 'V002_0208195256', 'V002_0208826360', 'V002_0211218347', 'V002_0212134839', 'V002_0212940402', 'V002_0212956319', 'V002_0216420373', 'V002_0218404297', 'V002_0220177293', 'V002_0221063052', 'V002_0221587188', 'V002_0222451528', 'V002_0224687219', 'V002_0225061183', 'V002_0229334955', 'V002_0233540474', 'V002_0235666754', 'V002_0236192059', 'V002_0240850401', 'V002_0241361609', 'V002_0244191263', 'V002_0245855424', 'V002_0247152120', 'V002_0249097683', 'V002_0252426018', 'V002_0255494285', 'V002_0255608099', 'V002_0257346698', 'V002_0269546356', 'V002_0270095585', 'V002_0274509902', 'V002_0275758160', 'V002_0276410988', 'V002_0277529080', 'V002_0283667648', 'V002_0284401908', 'V002_0286815879', 'V002_0286995523', 'V002_0287840662', 'V002_0288009534', 'V002_0291029227', 'V002_0291959871', 'V002_0292394118', 'V002_0297388135', 'V002_0297420744', 'V002_0297473891', 'V002_0297549891', 'V002_0298546074', 'V002_0299434830', 'V002_0299775990', 'V002_0299905233', 'V002_0301254103', 'V002_0307195239', 'V002_0308844411', 'V002_0309713802', 'V002_0309930562', 'V002_0311072282', 'V002_0311467742', 'V002_0316579589', 'V002_0317202847', 'V002_0320007457', 'V002_0320171280', 'V002_0320476146', 'V002_0320756880', 'V002_0323134151', 'V002_0326311152', 'V002_0326550821', 'V002_0327005780', 'V002_0328096530', 'V002_0329678794', 'V002_0334237749', 'V002_0339065151', 'V002_0342304013', 'V002_0344825083', 'V002_0347561656', 'V002_0348969075', 'V002_0349241207', 'V002_0349763418', 'V002_0351045416', 'V002_0352143145', 'V002_0352284848', 'V002_0353392455', 'V002_0354962495', 'V002_0355737967', 'V002_0356559082', 'V002_0356619866', 'V002_0359967499', 'V002_0360168311', 'V002_0362800061', 'V002_0364121140', 'V002_0365251479', 'V002_0366772346', 'V002_0367613648', 'V002_0369457668', 'V002_0371273978', 'V002_0373986946', 'V002_0375453515', 'V002_0375856194', 'V002_0376352558', 'V002_0379229863', 'V002_0381265097', 'V002_0381335418', 'V002_0386250867', 'V002_0387191097', 'V002_0388619653', 'V002_0391695991', 'V002_0393494333', 'V002_0396263461', 'V002_0397113576', 'V002_0397133330', 'V002_0400433341', 'V002_0401164934', 'V002_0404477857', 'V002_0407240844', 'V002_0407679815', 'V002_0416354734', 'V002_0416570976', 'V002_0418507185', 'V002_0423796818', 'V002_0425622811', 'V002_0427593952', 'V002_0431573958', 'V002_0431958824', 'V002_0433761493', 'V002_0436645759', 'V002_0437071870', 'V002_0439575373', 'V002_0439632791', 'V002_0443164266', 'V002_0449062133', 'V002_0450542838', 'V002_0454501327', 'V002_0457952196', 'V002_0463361919', 'V002_0466006912', 'V002_0468560499', 'V002_0471330250', 'V002_0471981204', 'V002_0472832198', 'V002_0474491737', 'V002_0474626306', 'V002_0477858845', 'V002_0479133304', 'V002_0481273690', 'V002_0482325643', 'V002_0484801753', 'V002_0490775701', 'V002_0490846533', 'V002_0494892504', 'V002_0495746495', 'V002_0497730015', 'V002_0498462311', 'V002_0500650637', 'V002_0500653886', 'V002_0502440950', 'V002_0504497675', 'V002_0505224346', 'V002_0506752981', 'V002_0512398926', 'V002_0519676183', 'V002_0522245575', 'V002_0522890912', 'V002_0522963432', 'V002_0528535807', 'V002_0529165456', 'V002_0529927015', 'V002_0530677927', 'V002_0530883030', 'V002_0534673604', 'V002_0534887261', 'V002_0535393527', 'V002_0535917635', 'V002_0536611169', 'V002_0537634433', 'V002_0537715966', 'V002_0544972793', 'V002_0546143274', 'V002_0547066919', 'V002_0549386648', 'V002_0556348515', 'V002_0556730709', 'V002_0557576331', 'V002_0558192303', 'V002_0558949379', 'V002_0558984562', 'V002_0559028861', 'V002_0561897620', 'V002_0562289643', 'V002_0568041619', 'V002_0568401377', 'V002_0570682911', 'V002_0572069634', 'V002_0574453382', 'V002_0575881872', 'V002_0577134847', 'V002_0577488999', 'V002_0582723337', 'V002_0582969225', 'V002_0584002867', 'V002_0585088913', 'V002_0588011622', 'V002_0590689367', 'V002_0590895370', 'V002_0595636961', 'V002_0596958579', 'V002_0599405570', 'V002_0600045286', 'V002_0600953911', 'V002_0602433805', 'V002_0602796495', 'V002_0609286048', 'V002_0610901396', 'V002_0611911538', 'V002_0614930760', 'V002_0614945368', 'V002_0615281059', 'V002_0617912383', 'V002_0618453534', 'V002_0618847727', 'V002_0620221594', 'V002_0623533003', 'V002_0627564542', 'V002_0628199522', 'V002_0632413315', 'V002_0633371878', 'V002_0639504450', 'V002_0640420811', 'V002_0640581909', 'V002_0641401452', 'V002_0642575528', 'V002_0646724413', 'V002_0650774199', 'V002_0650846407', 'V002_0651558121', 'V002_0661890045', 'V002_0664023151', 'V002_0665055691', 'V002_0668823247', 'V002_0669868207', 'V002_0671196632', 'V002_0672853065', 'V002_0673643091', 'V002_0674932509', 'V002_0675028037', 'V002_0675640729', 'V002_0676973061', 'V002_0677810268', 'V002_0681177888', 'V002_0682674466', 'V002_0684667227', 'V002_0686356742', 'V002_0688288694', 'V002_0689248805', 'V002_0691268976', 'V002_0691545174', 'V002_0691560258', 'V002_0692546736', 'V002_0693062145', 'V002_0694851989', 'V002_0696065184', 'V002_0697102328', 'V002_0698918731', 'V002_0699110679', 'V002_0700372734', 'V002_0701182045', 'V002_0701205134', 'V002_0702900679', 'V002_0704757247', 'V002_0705537418', 'V002_0707543849', 'V002_0709696188', 'V002_0709963030', 'V002_0710459436', 'V002_0710715302', 'V002_0712869049', 'V002_0713571456', 'V002_0714672359', 'V002_0715424637', 'V002_0715590249', 'V002_0717601127', 'V002_0719206406', 'V002_0721215989', 'V002_0722035976', 'V002_0722372002', 'V002_0730274815', 'V002_0737762530', 'V002_0738985261', 'V002_0739120299', 'V002_0739470437', 'V002_0739623988', 'V002_0741417019', 'V002_0742504340', 'V002_0744890693', 'V002_0745525134', 'V002_0747559873', 'V002_0748005837', 'V002_0749007313', 'V002_0749905280', 'V002_0752077612', 'V002_0752985425', 'V002_0755207341', 'V002_0761773907', 'V002_0762476141', 'V002_0762773432', 'V002_0763342936', 'V002_0764857242', 'V002_0766109859', 'V002_0766353043', 'V002_0766488097', 'V002_0766667365', 'V002_0771397601', 'V002_0772156154', 'V002_0774101263', 'V002_0777008783', 'V002_0777654226', 'V002_0777690162', 'V002_0779333134', 'V002_0780929069', 'V002_0782068310', 'V002_0785140342', 'V002_0787782863', 'V002_0789262248', 'V002_0789483462', 'V002_0789572661', 'V002_0794325433', 'V002_0797749170', 'V002_0799985522', 'V002_0802496349', 'V002_0803188052', 'V002_0808444885', 'V002_0811855193', 'V002_0812000840', 'V002_0813150657', 'V002_0815659127', 'V002_0817110802', 'V002_0817735792', 'V002_0819079556', 'V002_0824521511', 'V002_0826731877', 'V002_0827248561', 'V002_0827932554', 'V002_0831197243', 'V002_0831886867', 'V002_0833676417', 'V002_0838097784', 'V002_0838103625', 'V002_0840708462', 'V002_0841975452', 'V002_0847458797', 'V002_0848812536', 'V002_0849930724', 'V002_0853675695', 'V002_0855146156', 'V002_0855191397', 'V002_0858741158', 'V002_0862709821', 'V002_0864479472', 'V002_0869406535', 'V002_0869495858', 'V002_0870407510', 'V002_0873160823', 'V002_0874733459', 'V002_0874996302', 'V002_0876877612', 'V002_0877591318', 'V002_0877924426', 'V002_0880057025', 'V002_0881147747', 'V002_0882398770', 'V002_0882996452', 'V002_0883055626', 'V002_0883152505', 'V002_0883592543', 'V002_0886184755', 'V002_0887255013', 'V002_0888556394', 'V002_0889387713', 'V002_0890660406', 'V002_0893086450', 'V002_0894728006', 'V002_0895914685', 'V002_0896004525', 'V002_0896208434', 'V002_0897888454', 'V002_0897897654', 'V002_0898063991', 'V002_0899057827', 'V002_0901310965', 'V002_0901452304', 'V002_0901895362', 'V002_0901996369', 'V002_0902280313', 'V002_0902554667', 'V002_0904085833', 'V002_0904968570', 'V002_0906249778', 'V002_0906515822', 'V002_0906621455', 'V002_0907274463', 'V002_0907292919', 'V002_0907519228', 'V002_0908491866', 'V002_0910244581', 'V002_0912758648', 'V002_0916324310', 'V002_0919289185', 'V002_0920638160', 'V002_0930992660', 'V002_0931957489', 'V002_0936351733', 'V002_0939789896', 'V002_0939911045', 'V002_0941816863', 'V002_0941885516', 'V002_0943106892', 'V002_0946012451', 'V002_0946857895', 'V002_0947162777', 'V002_0948253523', 'V002_0948685110', 'V002_0949076238', 'V002_0957452382', 'V002_0958100099', 'V002_0960111321', 'V002_0965312207', 'V002_0966580765', 'V002_0968007712', 'V002_0968172904', 'V002_0972161122', 'V002_0973330237', 'V002_0973790468', 'V002_0974253878', 'V002_0976792908', 'V002_0978079902', 'V002_0980973112', 'V002_0982816213', 'V002_0984291380', 'V002_0987302788', 'V002_0987458985', 'V002_0990921045', 'V002_0991824436', 'V002_0992682883', 'V002_0992852586', 'V002_0993065000', 'V002_0993647540', 'V002_0993654361', 'V002_0993846782', 'V002_0994549967', 'V002_0995774071', 'V002_0997783389', 'V002_0998133176', 'V002_0998239314', 'V002_1000709992', 'V002_1000859300', 'V002_1001280775', 'V002_1002181678', 'V002_1002529149', 'V002_1003429911', 'V002_1004520265', 'V002_1007111171', 'V002_1007413279', 'V002_1008029581', 'V002_1008051186', 'V002_1008236852', 'V002_1009339127', 'V002_1009858584', 'V002_1010985869', 'V002_1018508422', 'V002_1020958668', 'V002_1022202283', 'V002_1023609082', 'V002_1025989171', 'V002_1029795797', 'V002_1033045749', 'V002_1033695009', 'V002_1036804803', 'V002_1038508373', 'V002_1039378109', 'V002_1041231289', 'V002_1046632512', 'V002_1047393448', 'V002_1049594700', 'V002_1050409256', 'V002_1053439575', 'V002_1055568246', 'V002_1058941361', 'V002_1060830736', 'V002_1064340703', 'V002_1065881612', 'V002_1065899356', 'V002_1066077836', 'V002_1066185361', 'V002_1066660417', 'V002_1066996033', 'V002_1068216568', 'V002_1068855613', 'V002_1070113831', 'V002_1070587920', 'V002_1070677845', 'V002_1070910839', 'V002_1071830634', 'V002_1072388517', 'V002_1075038374', 'V002_1075253673', 'V002_1076877592', 'V002_1078320106', 'V002_1078465970', 'V002_1080591451', 'V002_1081441899', 'V002_1082231071', 'V002_1082731566', 'V002_1083030933', 'V002_1083407701', 'V002_1085134270', 'V002_1088248497', 'V002_1088675664', 'V002_1097882258', 'V002_1100305498', 'V002_1106291932', 'V002_1107142644', 'V002_1112183131', 'V002_1114073255', 'V002_1115431892', 'V002_1116894993', 'V002_1117000236', 'V002_1117487344', 'V002_1119442781', 'V002_1120605606', 'V002_1120994934', 'V002_1121972280', 'V002_1122490419', 'V002_1122729191', 'V002_1133471068', 'V002_1134949730', 'V002_1136062616', 'V002_1137354832', 'V002_1142448779', 'V002_1143050071', 'V002_1143301161', 'V002_1145883393', 'V002_1146376911', 'V002_1148449684', 'V002_1152634721', 'V002_1157367812', 'V002_1157722563', 'V002_1161068151', 'V002_1163758061', 'V002_1164245197', 'V002_1169057347', 'V002_1169408395', 'V002_1170600948', 'V002_1170820414', 'V002_1171685376', 'V002_1173986975', 'V002_1174058357', 'V002_1176803230', 'V002_1178443436', 'V002_1178784816', 'V002_1181172233', 'V002_1186993555', 'V002_1187389016', 'V002_1188436296', 'V002_1188828155', 'V002_1191083747', 'V002_1191550853', 'V002_1191882305', 'V002_1196212628', 'V002_1196736452', 'V002_1197063482', 'V002_1198951072', 'V002_1202730641', 'V002_1202743448', 'V002_1205284632', 'V002_1209949477', 'V002_1210856282', 'V002_1212295032', 'V002_1212981693', 'V002_1218156908', 'V002_1228297767', 'V002_1229388246', 'V002_1235114183', 'V002_1235612760', 'V002_1236848877', 'V002_1237287650', 'V002_1240342045', 'V002_1240493706', 'V002_1246028851', 'V002_1247020544', 'V002_1248657521', 'V002_1249700098', 'V002_1253230784', 'V002_1253801426', 'V002_1260482987', 'V002_1262658831', 'V002_1263437851', 'V002_1264119338', 'V002_1264486747', 'V002_1265421546', 'V002_1266895272', 'V002_1269018538', 'V002_1269203946', 'V002_1270080989', 'V002_1273721458', 'V002_1277569702', 'V002_1281813293', 'V002_1285049543', 'V002_1287155800', 'V002_1289485843', 'V002_1289488851', 'V002_1292575885', 'V002_1293127302', 'V002_1293431235', 'V002_1294058734', 'V002_1294106842', 'V002_1295306159', 'V002_1295989128', 'V002_1298822007', 'V002_1301128863', 'V002_1308296931', 'V002_1308720165', 'V002_1309065653', 'V002_1309707555', 'V002_1311545669', 'V002_1312385532', 'V002_1316910582', 'V002_1318237107', 'V002_1318788526', 'V002_1320535007', 'V002_1320687521', 'V002_1325895197', 'V002_1326308943', 'V002_1329926707', 'V002_1330744321', 'V002_1331018754', 'V002_1332015085', 'V002_1336764573', 'V002_1339592177', 'V002_1341090862', 'V002_1344134886', 'V002_1347264631', 'V002_1347458931', 'V002_1349535528', 'V002_1353148957', 'V002_1354177452', 'V002_1355759801', 'V002_1356021856', 'V002_1357490755', 'V002_1357925130', 'V002_1361922149', 'V002_1363276107', 'V002_1365080283', 'V002_1365865617', 'V002_1366766588', 'V002_1367979130', 'V002_1373253143', 'V002_1373765277', 'V002_1374001193', 'V002_1378083990', 'V002_1379815612', 'V002_1380548079', 'V002_1382454387', 'V002_1382987701', 'V002_1384478526', 'V002_1385655506', 'V002_1386034662', 'V002_1387045120', 'V002_1388560949', 'V002_1388811281', 'V002_1390535800', 'V002_1390694969', 'V002_1393908005', 'V002_1394285671', 'V002_1396609592', 'V002_1401831317', 'V002_1403635179', 'V002_1405429530', 'V002_1405699923', 'V002_1406349153', 'V002_1406741965', 'V002_1407348334', 'V002_1407504929', 'V002_1412941061', 'V002_1413294294', 'V002_1416898965', 'V002_1418630397', 'V002_1419526270', 'V002_1420470519', 'V002_1421692056', 'V002_1422994904', 'V002_1423569515', 'V002_1423629975', 'V002_1424985686', 'V002_1426895494', 'V002_1427118418', 'V002_1427436531', 'V002_1430314246', 'V002_1430610583', 'V002_1433942915', 'V002_1434825852', 'V002_1435585809', 'V002_1436128392', 'V002_1438872462', 'V002_1439360023', 'V002_1442106462', 'V002_1444408466', 'V002_1447990817', 'V002_1448062513', 'V002_1452730243', 'V002_1455769088', 'V002_1458188059', 'V002_1460862445', 'V002_1464957504', 'V002_1465367789', 'V002_1466103036', 'V002_1467259341', 'V002_1468124531', 'V002_1468737851', 'V002_1470782295', 'V002_1471437462', 'V002_1472026878', 'V002_1475924825', 'V002_1479582878', 'V002_1480031366', 'V002_1480550851', 'V002_1483521522', 'V002_1483593429', 'V002_1484273854', 'V002_1485799646', 'V002_1486943207', 'V002_1488063327', 'V002_1488383297', 'V002_1490178217', 'V002_1493845431', 'V002_1495123744', 'V002_1495324089', 'V002_1497759269', 'V002_1502369794', 'V002_1502601422', 'V002_1504218472', 'V002_1505796060', 'V002_1506704955', 'V002_1506960961', 'V002_1507679478', 'V002_1509154779', 'V002_1512217307', 'V002_1513953083', 'V002_1518048337', 'V002_1518414331', 'V002_1519209633', 'V002_1520770528', 'V002_1522030901', 'V002_1523354335', 'V002_1524423095', 'V002_1526381145', 'V002_1526540519', 'V002_1527046816', 'V002_1527683448', 'V002_1528845248', 'V002_1528854324', 'V002_1528980257', 'V002_1531539637', 'V002_1533987657', 'V002_1535964966', 'V002_1543294289', 'V002_1545107099', 'V002_1545551684', 'V002_1547383939', 'V002_1548417143', 'V002_1550590473', 'V002_1552725169', 'V002_1553849179', 'V002_1554411255', 'V002_1555200269', 'V002_1556550310', 'V002_1556876927', 'V002_1557109227', 'V002_1557928272', 'V002_1558458374', 'V002_1559658353', 'V002_1560183854', 'V002_1560409961', 'V002_1560424992', 'V002_1562167099', 'V002_1569892532', 'V002_1571203620', 'V002_1571861011', 'V002_1573620798', 'V002_1574858433', 'V002_1577001924', 'V002_1581686429', 'V002_1582093994', 'V002_1583289730', 'V002_1584081213', 'V002_1584847559', 'V002_1585588569', 'V002_1586243165', 'V002_1586724393', 'V002_1589060799', 'V002_1589123959', 'V002_1590826836', 'V002_1591309206', 'V002_1592747066', 'V002_1593048258', 'V002_1593332672', 'V002_1593945935', 'V002_1594046931', 'V002_1594306630', 'V002_1594770319', 'V002_1594925832', 'V002_1596128708', 'V002_1596519151', 'V002_1597771478', 'V002_1600167391', 'V002_1601090290', 'V002_1603168782', 'V002_1603174376', 'V002_1605338098', 'V002_1605747761', 'V002_1606267892', 'V002_1607338256', 'V002_1612851756', 'V002_1616621058', 'V002_1617855742', 'V002_1620591436', 'V002_1620721556', 'V002_1623219027', 'V002_1624309694', 'V002_1625939675', 'V002_1626599408', 'V002_1630024514', 'V002_1632730497', 'V002_1635614714', 'V002_1636969792', 'V002_1638273429', 'V002_1638275283', 'V002_1638620820', 'V002_1639118241', 'V002_1639368298', 'V002_1640617843', 'V002_1643244390', 'V002_1643463549', 'V002_1643940530', 'V002_1644911073', 'V002_1645491156', 'V002_1646135716', 'V002_1646467648', 'V002_1647002546', 'V002_1648654142', 'V002_1649880165', 'V002_1650052222', 'V002_1651783767', 'V002_1651812722', 'V002_1651868070', 'V002_1651983355', 'V002_1653439723', 'V002_1654663578', 'V002_1656891150', 'V002_1659003112', 'V002_1660978191', 'V002_1662483407', 'V002_1664847291', 'V002_1665594243', 'V002_1665800749', 'V002_1668224628', 'V002_1673979258', 'V002_1674064433', 'V002_1674086296', 'V002_1674558150', 'V002_1676912856', 'V002_1680394105', 'V002_1681128850', 'V002_1681342275', 'V002_1683418433', 'V002_1685360064', 'V002_1685657932', 'V002_1687648100', 'V002_1688487282', 'V002_1688687361', 'V002_1690563968', 'V002_1691089616', 'V002_1691866623', 'V002_1694182567', 'V002_1696559279', 'V002_1700094419', 'V002_1703501196', 'V002_1704271969', 'V002_1704286734', 'V002_1704465432', 'V002_1704919900', 'V002_1705144139', 'V002_1705149340', 'V002_1705606028', 'V002_1705816815', 'V002_1706928067', 'V002_1707915966', 'V002_1708873765', 'V002_1709092712', 'V002_1710087492', 'V002_1710555480', 'V002_1711423748', 'V002_1712502873', 'V002_1716389219', 'V002_1716996602', 'V002_1718045845', 'V002_1720222120', 'V002_1720840562', 'V002_1724865620', 'V002_1725746334', 'V002_1725974875', 'V002_1727741125', 'V002_1730119726', 'V002_1731294902', 'V002_1734945779', 'V002_1735478421', 'V002_1738276908', 'V002_1739268512', 'V002_1740177833', 'V002_1742658037', 'V002_1742800723', 'V002_1744637161', 'V002_1747594647', 'V002_1748632751', 'V002_1749865508', 'V002_1751860511', 'V002_1752423640', 'V002_1753425375', 'V002_1753997884', 'V002_1754787777', 'V002_1757005423', 'V002_1757486515', 'V002_1760816625', 'V002_1768545558', 'V002_1768897133', 'V002_1770118143', 'V002_1770381161', 'V002_1770489489', 'V002_1772557005', 'V002_1774257347', 'V002_1774348101', 'V002_1775860689', 'V002_1777630211', 'V002_1777832832', 'V002_1778090644', 'V002_1779155714', 'V002_1782600428', 'V002_1783672766', 'V002_1787560070', 'V002_1789069970', 'V002_1789445465', 'V002_1791251732', 'V002_1791690283', 'V002_1793064578', 'V002_1795113704', 'V002_1798216704', 'V002_1803869342', 'V002_1808231566', 'V002_1808292828', 'V002_1810715222', 'V002_1813324575', 'V002_1813775445', 'V002_1814947313', 'V002_1815197728', 'V002_1815390874', 'V002_1816710602', 'V002_1820217122', 'V002_1823331428', 'V002_1828238716', 'V002_1829266598', 'V002_1829709543', 'V002_1830145246', 'V002_1833585159', 'V002_1840211966', 'V002_1840558569', 'V002_1840719534', 'V002_1843166947', 'V002_1845781056', 'V002_1847204875', 'V002_1853581512', 'V002_1854017001', 'V002_1856292166', 'V002_1858040048', 'V002_1859660548', 'V002_1861566137', 'V002_1865212979', 'V002_1865238998', 'V002_1872708897', 'V002_1874380475', 'V002_1875523970', 'V002_1877608953', 'V002_1879720124', 'V002_1880426633', 'V002_1890124356', 'V002_1890211044', 'V002_1890419758', 'V002_1893440342', 'V002_1893797507', 'V002_1896099386', 'V002_1897231736', 'V002_1899415753', 'V002_1900462823', 'V002_1901552367', 'V002_1901778455', 'V002_1902394338', 'V002_1910885723', 'V002_1911588233', 'V002_1913108307', 'V002_1914181332', 'V002_1915324294', 'V002_1917173746', 'V002_1918645971', 'V002_1919045378', 'V002_1920997654', 'V002_1921010431', 'V002_1921732832', 'V002_1925548101', 'V002_1926875306', 'V002_1927144130', 'V002_1927212231', 'V002_1927346191', 'V002_1928085130', 'V002_1928469536', 'V002_1928787714', 'V002_1929214112', 'V002_1932589421', 'V002_1932808724', 'V002_1933529509', 'V002_1935535639', 'V002_1939735380', 'V002_1939753661', 'V002_1944780843', 'V002_1944970221', 'V002_1946612065', 'V002_1946905022', 'V002_1947771148', 'V002_1949548880', 'V002_1951635574', 'V002_1953099203', 'V002_1953858043', 'V002_1956192343', 'V002_1958218190', 'V002_1959375803', 'V002_1959864380', 'V002_1960016989', 'V002_1961061922', 'V002_1961090634', 'V002_1962523506', 'V002_1962684636', 'V002_1964031888', 'V002_1964258086', 'V002_1965806128', 'V002_1971933797', 'V002_1975307534', 'V002_1981308081', 'V002_1986674913', 'V002_1987496385', 'V002_1988081545', 'V002_1990927005', 'V002_1991434403', 'V002_1993733794', 'V002_1994819906', 'V002_1995256818', 'V002_1996733304', 'V002_1997967109', 'V002_2002991053', 'V002_2005149183', 'V002_2005445821', 'V002_2006376018', 'V002_2016933563', 'V002_2017063719', 'V002_2020913260', 'V002_2021313310', 'V002_2021996680', 'V002_2025043071', 'V002_2029948185', 'V002_2030991815', 'V002_2031965940', 'V002_2032235438', 'V002_2032909832', 'V002_2034848401', 'V002_2036520132', 'V002_2036604277', 'V002_2036983534', 'V002_2039220578', 'V002_2039623996', 'V002_2041338521', 'V002_2042201587', 'V002_2044397839', 'V002_2045096561', 'V002_2048263554', 'V002_2050433076', 'V002_2051586698', 'V002_2053463990', 'V002_2053640689', 'V002_2056417306', 'V002_2058410787', 'V002_2062456579', 'V002_2063531755', 'V002_2063657219', 'V002_2066172631', 'V002_2070264303', 'V002_2071754556', 'V002_2071997602', 'V002_2077679769', 'V002_2079897137', 'V002_2080267358', 'V002_2083322837', 'V002_2084947828', 'V002_2084967128', 'V002_2085030896', 'V002_2085487790', 'V002_2086666846', 'V002_2086756963', 'V002_2089089877', 'V002_2090355376', 'V002_2091081269', 'V002_2093535232', 'V002_2094345995', 'V002_2094464750', 'V002_2094835975', 'V002_2094978340', 'V002_2097256259', 'V002_2099261029', 'V002_2102758249', 'V002_2103906936', 'V002_2107611438', 'V002_2109818404', 'V002_2110873556', 'V002_2111319132', 'V002_2111579346', 'V002_2112102794', 'V002_2115021522', 'V002_2115731829', 'V002_2116315882', 'V002_2119692438', 'V002_2122236831', 'V002_2122276371', 'V002_2124445036', 'V002_2125705115', 'V002_2130025612', 'V002_2134045439', 'V002_2137562453', 'V002_2137672754', 'V002_2139776609', 'V002_2141355194', 'V002_2143429520', 'V002_2145663573', 'V002_2147070730', 'V002_2152692460', 'V002_2153139576', 'V002_2153397867', 'V002_2156234991', 'V002_2156630881', 'V002_2157983123', 'V002_2161706955', 'V002_2162453759', 'V002_2163281401', 'V002_2165071824', 'V002_2165299265', 'V002_2166522135', 'V002_2172757020', 'V002_2174868746', 'V002_2177073409', 'V002_2179084311', 'V002_2179548810', 'V002_2180135055', 'V002_2183496559', 'V002_2186611685', 'V002_2186857533', 'V002_2187472405', 'V002_2188069091', 'V002_2193489175', 'V002_2194500412', 'V002_2194791642', 'V002_2196745471', 'V002_2198646004', 'V002_2201032988', 'V002_2203468301', 'V002_2204274268', 'V002_2208405880', 'V002_2209558063', 'V002_2209646323', 'V002_2210211678', 'V002_2212657562', 'V002_2212903161', 'V002_2216903522', 'V002_2218201118', 'V002_2219991919', 'V002_2220116935', 'V002_2221567069', 'V002_2225535656', 'V002_2226704622', 'V002_2227310038', 'V002_2229306457', 'V002_2231035658', 'V002_2235571692', 'V002_2239597194', 'V002_2239964066', 'V002_2241877736', 'V002_2242365296', 'V002_2244244385', 'V002_2244355299', 'V002_2246147401', 'V002_2246611878', 'V002_2247924574', 'V002_2251115511', 'V002_2252538703', 'V002_2252778396', 'V002_2254379714', 'V002_2257556339', 'V002_2258992853', 'V002_2260949931', 'V002_2262701470', 'V002_2263324937', 'V002_2263360045', 'V002_2264170216', 'V002_2265925149', 'V002_2267955109', 'V002_2269924837', 'V002_2273236930', 'V002_2273643129', 'V002_2274372382', 'V002_2275940867', 'V002_2280013929', 'V002_2282051240', 'V002_2282359407', 'V002_2283488697', 'V002_2286061408', 'V002_2286217435', 'V002_2286730729', 'V002_2289925880', 'V002_2290709597', 'V002_2291207850', 'V002_2292678957', 'V002_2296648864', 'V002_2297313451', 'V002_2297965623', 'V002_2298395469', 'V002_2299001906', 'V002_2300496729', 'V002_2301856604', 'V002_2303258869', 'V002_2303930775', 'V002_2309295183', 'V002_2311186335', 'V002_2311429926', 'V002_2311772051', 'V002_2311842694', 'V002_2316864420', 'V002_2319681891', 'V002_2319741692', 'V002_2321316498', 'V002_2321509927', 'V002_2325200426', 'V002_2325969194', 'V002_2330187247', 'V002_2330750842', 'V002_2333140109', 'V002_2334936412', 'V002_2335652535', 'V002_2336206518', 'V002_2336627624', 'V002_2337543877', 'V002_2338517671', 'V002_2339556794', 'V002_2339569080', 'V002_2341911063', 'V002_2342689308', 'V002_2343069482', 'V002_2343206494', 'V002_2343513463', 'V002_2343967095', 'V002_2345361193', 'V002_2349638003', 'V002_2351831539', 'V002_2352197962', 'V002_2357469664', 'V002_2359079173', 'V002_2360300089', 'V002_2367158928', 'V002_2369198417', 'V002_2375630099', 'V002_2376471517', 'V002_2376683564', 'V002_2378014342', 'V002_2383327261', 'V002_2385138403', 'V002_2392419591', 'V002_2392996413', 'V002_2396131513', 'V002_2396333156', 'V002_2397394626', 'V002_2401465719', 'V002_2401769871', 'V002_2405055514', 'V002_2406333292', 'V002_2407054011', 'V002_2409999570', 'V002_2410633901', 'V002_2410842892', 'V002_2413252735', 'V002_2417273350', 'V002_2418985993', 'V002_2419754941', 'V002_2420898386', 'V002_2421975969', 'V002_2423836285', 'V002_2424518145', 'V002_2425192010', 'V002_2425330566', 'V002_2425740720', 'V002_2428984828', 'V002_2430228287', 'V002_2430575120', 'V002_2431131483', 'V002_2433344614', 'V002_2433566091', 'V002_2434682882', 'V002_2436500134', 'V002_2438176718', 'V002_2439315430', 'V002_2441051072', 'V002_2441074433', 'V002_2442954554', 'V002_2445599111', 'V002_2445603539', 'V002_2448424485', 'V002_2449138982', 'V002_2450342451', 'V002_2450670815', 'V002_2451539827', 'V002_2452373319', 'V002_2452699289', 'V002_2453298459', 'V002_2455481485', 'V002_2458475768', 'V002_2458809579', 'V002_2464293228', 'V002_2466396061', 'V002_2468440511', 'V002_2468963522', 'V002_2468990291', 'V002_2471758837', 'V002_2471904192', 'V002_2475914594', 'V002_2476355734', 'V002_2477989722', 'V002_2478018580', 'V002_2478361329', 'V002_2479331902', 'V002_2480223166', 'V002_2480557312', 'V002_2482835442', 'V002_2485550033', 'V002_2488935906', 'V002_2493007723', 'V002_2496264041', 'V002_2496722281', 'V002_2501463911', 'V002_2501916538', 'V002_2503733526', 'V002_2504462253', 'V002_2506719580', 'V002_2510275966', 'V002_2511050066', 'V002_2511278323', 'V002_2511776947', 'V002_2512645875', 'V002_2513561633', 'V002_2517779235', 'V002_2520018055', 'V002_2520031520', 'V002_2524051024', 'V002_2526313598', 'V002_2529034451', 'V002_2529893143', 'V002_2532523149', 'V002_2534768358', 'V002_2535031363', 'V002_2536086600', 'V002_2536793484', 'V002_2536930525', 'V002_2539895969', 'V002_2541482516', 'V002_2542912588', 'V002_2544508499', 'V002_2544702864', 'V002_2545357112', 'V002_2550692759', 'V002_2553944632', 'V002_2555726114', 'V002_2558169391', 'V002_2559885727', 'V002_2560286004', 'V002_2560986078', 'V002_2563724795', 'V002_2565810373', 'V002_2567374062', 'V002_2569301356', 'V002_2569524256', 'V002_2569715723', 'V002_2570885463', 'V002_2571610252', 'V002_2573466548', 'V002_2573684124', 'V002_2574115277', 'V002_2574357994', 'V002_2574572299', 'V002_2576671420', 'V002_2578214619', 'V002_2579245888', 'V002_2579678893', 'V002_2579785925', 'V002_2579946543', 'V002_2581293328', 'V002_2581310185', 'V002_2583117412', 'V002_2583656811', 'V002_2584959213', 'V002_2585749054', 'V002_2585857673', 'V002_2586972692', 'V002_2588659959', 'V002_2594093513', 'V002_2597105572', 'V002_2599349912', 'V002_2601751022', 'V002_2603677267', 'V002_2608859869', 'V002_2609136299', 'V002_2613246306', 'V002_2614127229', 'V002_2614479132', 'V002_2619992203', 'V002_2625211154', 'V002_2626990645', 'V002_2627787087', 'V002_2631939527', 'V002_2633000303', 'V002_2635026517', 'V002_2636247702', 'V002_2637031515', 'V002_2637201862', 'V002_2638387435', 'V002_2638701290', 'V002_2639921555', 'V002_2642408544', 'V002_2643835693', 'V002_2646247120', 'V002_2646301807', 'V002_2649768012', 'V002_2649781638', 'V002_2649934399', 'V002_2651362190', 'V002_2652389044', 'V002_2652562876', 'V002_2655501767', 'V002_2655798834', 'V002_2657497779', 'V002_2657603551', 'V002_2658693294', 'V002_2658963781', 'V002_2659823348', 'V002_2662039186', 'V002_2662163609', 'V002_2666417234', 'V002_2669843682', 'V002_2673340328', 'V002_2675582139', 'V002_2675738580', 'V002_2678956601', 'V002_2679266454', 'V002_2682320078', 'V002_2682506118', 'V002_2683403731', 'V002_2683575308', 'V002_2683741805', 'V002_2687830605', 'V002_2688856811', 'V002_2689359249', 'V002_2690273129', 'V002_2692178520', 'V002_2696341915', 'V002_2696951822', 'V002_2699798619', 'V002_2699889813', 'V002_2701035171', 'V002_2701285926', 'V002_2701378687', 'V002_2703109220', 'V002_2706870355', 'V002_2707247914', 'V002_2707432129', 'V002_2707755432', 'V002_2713414423', 'V002_2716226613', 'V002_2716418886', 'V002_2724111168', 'V002_2726139953', 'V002_2733780314', 'V002_2734066465', 'V002_2735712212', 'V002_2737527715', 'V002_2738496206', 'V002_2741170144', 'V002_2741830838', 'V002_2741868526', 'V002_2742162291', 'V002_2742520245', 'V002_2742993386', 'V002_2746498359', 'V002_2747281549', 'V002_2749548158', 'V002_2750750385', 'V002_2751116430', 'V002_2752768356', 'V002_2753821278', 'V002_2753886915', 'V002_2754224778', 'V002_2758610175', 'V002_2760821816', 'V002_2761437006', 'V002_2762061444', 'V002_2763512180', 'V002_2763846643', 'V002_2765426820', 'V002_2768777602', 'V002_2776559458', 'V002_2779479189', 'V002_2783914580', 'V002_2784494460', 'V002_2785347120', 'V002_2785397767', 'V002_2785790820', 'V002_2786035253', 'V002_2786549360', 'V002_2786659494', 'V002_2788409132', 'V002_2791262997', 'V002_2793043544', 'V002_2794776420', 'V002_2796801748', 'V002_2800445743', 'V002_2800563895', 'V002_2803759247', 'V002_2804791304', 'V002_2805364925', 'V002_2807470823', 'V002_2807931124', 'V002_2808886413', 'V002_2809262912', 'V002_2810816646', 'V002_2812173348', 'V002_2814687326', 'V002_2815571140', 'V002_2816972333', 'V002_2819679443', 'V002_2821225217', 'V002_2821248692', 'V002_2824203799', 'V002_2827704856', 'V002_2834298787', 'V002_2835145648', 'V002_2835619638', 'V002_2838792578', 'V002_2841055519', 'V002_2841979058', 'V002_2843739260', 'V002_2844466730', 'V002_2845707597', 'V002_2846791061', 'V002_2847735612', 'V002_2848203132', 'V002_2849190765', 'V002_2849369904', 'V002_2851652335', 'V002_2852450930', 'V002_2852501411', 'V002_2855079287', 'V002_2855921381', 'V002_2856519795', 'V002_2856955233', 'V002_2857510010', 'V002_2857717979', 'V002_2860237947', 'V002_2860544624', 'V002_2861991334', 'V002_2862425589', 'V002_2864858358', 'V002_2866035029', 'V002_2866970981', 'V002_2869719417', 'V002_2872734352', 'V002_2873629434', 'V002_2874279722', 'V002_2876255074', 'V002_2879092957', 'V002_2879660523', 'V002_2883753184', 'V002_2884378163', 'V002_2884688290', 'V002_2884849745', 'V002_2886707194', 'V002_2887142764', 'V002_2889614718', 'V002_2892268640', 'V002_2893099904', 'V002_2894893416', 'V002_2895893815', 'V002_2896025296', 'V002_2896826394', 'V002_2897144282', 'V002_2897297931', 'V002_2898496442', 'V002_2900970140', 'V002_2903040522', 'V002_2903251596', 'V002_2908265363', 'V002_2908467832', 'V002_2910757435', 'V002_2910891907', 'V002_2911185872', 'V002_2911712976', 'V002_2913390880', 'V002_2915144807', 'V002_2915179158', 'V002_2915299873', 'V002_2916290962', 'V002_2916390833', 'V002_2916441072', 'V002_2917405619', 'V002_2917439886', 'V002_2917482698', 'V002_2918604985', 'V002_2919025644', 'V002_2919660812', 'V002_2920577760', 'V002_2922502639', 'V002_2922783168', 'V002_2923256596', 'V002_2923785626', 'V002_2924335104', 'V002_2924702911', 'V002_2924721700', 'V002_2927442629', 'V002_2928396929', 'V002_2928699496', 'V002_2929024322', 'V002_2929663755', 'V002_2930167508', 'V002_2931175305', 'V002_2932451188', 'V002_2932458284', 'V002_2938486957', 'V002_2939733354', 'V002_2944343680', 'V002_2944620393', 'V002_2947963433', 'V002_2948863317', 'V002_2949227977', 'V002_2952811928', 'V002_2953302336', 'V002_2955582993', 'V002_2957351688', 'V002_2957800666', 'V002_2960806659', 'V002_2962784595', 'V002_2963836044', 'V002_2965115956', 'V002_2969406414', 'V002_2973421115', 'V002_2973479925', 'V002_2973730379', 'V002_2974228193', 'V002_2975149772', 'V002_2977127343', 'V002_2980848376', 'V002_2981019252', 'V002_2982558804', 'V002_2984070530', 'V002_2985017478', 'V002_2987968735', 'V002_2988203016', 'V002_2996503376', 'V002_2997226108', 'V002_2997829558', 'V002_2998702032', 'V002_2999601229', 'V002_3001347120', 'V002_3003511552', 'V002_3007667230', 'V002_3012997349', 'V002_3014181363', 'V002_3019393728', 'V002_3021400221', 'V002_3022809013', 'V002_3026734924', 'V002_3028235496', 'V002_3029736120', 'V002_3030259099', 'V002_3030960263', 'V002_3031301581', 'V002_3031889251', 'V002_3033780459', 'V002_3035827835', 'V002_3036552318', 'V002_3037756405', 'V002_3039174000', 'V002_3039276379', 'V002_3040608612', 'V002_3040811090', 'V002_3041412826', 'V002_3043097739', 'V002_3044454290', 'V002_3046698288', 'V002_3047786703', 'V002_3049126612', 'V002_3049909721', 'V002_3052257479', 'V002_3054000763', 'V002_3054963746', 'V002_3056267216', 'V002_3059660444', 'V002_3061058803', 'V002_3061143019', 'V002_3061910666', 'V002_3066854235', 'V002_3068318777', 'V002_3069201486', 'V002_3069371438', 'V002_3070010548', 'V002_3072662043', 'V002_3073028825', 'V002_3073384599', 'V002_3073732945', 'V002_3074068341', 'V002_3074264617', 'V002_3074670914', 'V002_3076811902', 'V002_3078394870', 'V002_3084475322', 'V002_3085111275', 'V002_3086117986', 'V002_3095727417', 'V002_3096691171', 'V002_3097721495', 'V002_3098080758', 'V002_3099217823', 'V002_3100198099', 'V002_3101893229', 'V002_3102611491', 'V002_3103439045', 'V002_3104825158', 'V002_3106381586', 'V002_3106441814', 'V002_3112242776', 'V002_3112643934', 'V002_3115075631', 'V002_3120056729', 'V002_3121388095', 'V002_3122480612', 'V002_3123966166', 'V002_3128226876', 'V002_3135312101', 'V002_3139909712', 'V002_3142774774', 'V002_3143318729', 'V002_3147185401', 'V002_3150002019', 'V002_3153791817', 'V002_3159323068', 'V002_3160417620', 'V002_3162250402', 'V002_3162722752', 'V002_3163043611', 'V002_3164760928', 'V002_3165708505', 'V002_3166250838', 'V002_3169849818', 'V002_3170080596', 'V002_3171114042', 'V002_3171174044', 'V002_3174623406', 'V002_3175856731', 'V002_3180129478', 'V002_3181274661', 'V002_3181668771', 'V002_3181719364', 'V002_3184058419', 'V002_3184102283', 'V002_3185953217', 'V002_3186127831', 'V002_3186171181', 'V002_3189585388', 'V002_3196178036', 'V002_3200015915', 'V002_3201253669', 'V002_3201299292', 'V002_3204475177', 'V002_3205495211', 'V002_3205780313', 'V002_3206791230', 'V002_3208398477', 'V002_3211400399', 'V002_3212049180', 'V002_3212125236', 'V002_3218318768', 'V002_3219591824', 'V002_3219881695', 'V002_3220462579', 'V002_3225768347', 'V002_3226333965', 'V002_3226439088', 'V002_3226597065', 'V002_3227287445', 'V002_3227684671', 'V002_3229076139', 'V002_3230727667', 'V002_3232278675', 'V002_3233360232', 'V002_3235371964', 'V002_3235710820', 'V002_3235914253', 'V002_3238207740', 'V002_3239673046', 'V002_3240790889', 'V002_3241806874', 'V002_3242175733', 'V002_3243413375', 'V002_3244606849', 'V002_3244742642', 'V002_3249632693', 'V002_3251796916', 'V002_3252385886', 'V002_3252551977', 'V002_3254116158', 'V002_3256981268', 'V002_3259149616', 'V002_3259165662', 'V002_3259176605', 'V002_3259333294', 'V002_3260573024', 'V002_3261867230', 'V002_3263178717', 'V002_3263389930', 'V002_3266990820', 'V002_3267919573', 'V002_3268658592', 'V002_3270258197', 'V002_3272117408', 'V002_3273011346', 'V002_3273918681', 'V002_3275413374', 'V002_3275882265', 'V002_3277875026', 'V002_3280432754', 'V002_3282721741', 'V002_3283493886', 'V002_3284550107', 'V002_3287712679', 'V002_3291008408', 'V002_3291332283', 'V002_3295129490', 'V002_3296547703', 'V002_3297131933', 'V002_3300657952', 'V002_3302951420', 'V002_3303472726', 'V002_3304998982', 'V002_3307393927', 'V002_3313115428', 'V002_3313304542', 'V002_3315778771', 'V002_3316064056', 'V002_3317231072', 'V002_3317585219', 'V002_3318136657', 'V002_3321027408', 'V002_3321229190', 'V002_3327945740', 'V002_3327971637', 'V002_3328818499', 'V002_3332322376', 'V002_3337603790', 'V002_3339013360', 'V002_3339158908', 'V002_3342195906', 'V002_3342275204', 'V002_3347496967', 'V002_3348202749', 'V002_3349843721', 'V002_3352034768', 'V002_3357090197', 'V002_3358406111', 'V002_3359044577', 'V002_3359982558', 'V002_3361173278', 'V002_3362269158', 'V002_3364242818', 'V002_3364284972', 'V002_3364806043', 'V002_3367793405', 'V002_3368242102', 'V002_3368409468', 'V002_3370144713', 'V002_3371901172', 'V002_3371983752', 'V002_3373174473', 'V002_3375996008', 'V002_3377619970', 'V002_3378064450', 'V002_3383181906', 'V002_3385534392', 'V002_3388662534', 'V002_3389325538', 'V002_3389398233', 'V002_3390824469', 'V002_3391469112', 'V002_3394072290', 'V002_3404214008', 'V002_3407338551', 'V002_3407864191', 'V002_3409007145', 'V002_3409298166', 'V002_3411540143', 'V002_3412481129', 'V002_3414596485', 'V002_3414724435', 'V002_3416267475', 'V002_3416363690', 'V002_3418176052', 'V002_3418574202', 'V002_3421243326', 'V002_3424008669', 'V002_3426498211', 'V002_3428213366', 'V002_3431781657', 'V002_3433101101', 'V002_3433744739', 'V002_3436511963', 'V002_3440116059', 'V002_3440973993', 'V002_3441429715', 'V002_3442711206', 'V002_3442954804', 'V002_3444384811', 'V002_3447521154', 'V002_3448443642', 'V002_3450128820', 'V002_3451518030', 'V002_3451631763', 'V002_3451993461', 'V002_3454384057', 'V002_3454498389', 'V002_3454980684', 'V002_3455404258', 'V002_3457568057', 'V002_3459231681', 'V002_3460754382', 'V002_3463144025', 'V002_3463179213', 'V002_3466762035', 'V002_3468003149', 'V002_3468876100', 'V002_3471000996', 'V002_3471413018', 'V002_3472825016', 'V002_3475929404', 'V002_3476817276', 'V002_3478773502', 'V002_3479811140', 'V002_3480838915', 'V002_3481623160', 'V002_3483462420', 'V002_3484444772', 'V002_3487954598', 'V002_3489758299', 'V002_3492283988', 'V002_3496940283', 'V002_3498590083', 'V002_3501499336', 'V002_3503564455', 'V002_3506297141', 'V002_3506963252', 'V002_3507078827', 'V002_3513487762', 'V002_3514111952', 'V002_3516342746', 'V002_3516569183', 'V002_3517406327', 'V002_3523229620', 'V002_3523315584', 'V002_3524902556', 'V002_3524925726', 'V002_3526161632', 'V002_3529786491', 'V002_3529942584', 'V002_3533626436', 'V002_3534133537', 'V002_3534241171', 'V002_3538007353', 'V002_3539340978', 'V002_3539519212', 'V002_3541055192', 'V002_3542168490', 'V002_3543050581', 'V002_3544706342', 'V002_3545582447', 'V002_3547165378', 'V002_3547452053', 'V002_3547548047', 'V002_3547823408', 'V002_3547991592', 'V002_3548191761', 'V002_3550769255', 'V002_3550939959', 'V002_3552627236', 'V002_3552943591', 'V002_3553165542', 'V002_3554882366', 'V002_3556602737', 'V002_3559482644', 'V002_3560782873', 'V002_3562318294', 'V002_3563056030', 'V002_3563260632', 'V002_3563308324', 'V002_3564651593', 'V002_3565219493', 'V002_3565835223', 'V002_3569017479', 'V002_3572181261', 'V002_3572684586', 'V002_3572882975', 'V002_3573219493', 'V002_3575676458', 'V002_3578040767', 'V002_3578109098', 'V002_3579727321', 'V002_3580299592', 'V002_3580390390', 'V002_3582217771', 'V002_3582778189', 'V002_3583234388', 'V002_3583799362', 'V002_3584620542', 'V002_3587874281', 'V002_3588526752', 'V002_3589412395', 'V002_3591237209', 'V002_3591410915', 'V002_3591870440', 'V002_3592485937', 'V002_3596513978', 'V002_3596679646', 'V002_3599446482', 'V002_3600943273', 'V002_3601513830', 'V002_3602291868', 'V002_3606623696', 'V002_3610139916', 'V002_3610284095', 'V002_3610399143', 'V002_3610734384', 'V002_3611733812', 'V002_3612831945', 'V002_3612968001', 'V002_3614155821', 'V002_3615902345', 'V002_3618691280', 'V002_3620300710', 'V002_3620632604', 'V002_3622439348', 'V002_3623430432', 'V002_3624518799', 'V002_3627463783', 'V002_3627898703', 'V002_3628185777', 'V002_3628739735', 'V002_3630733342', 'V002_3633548439', 'V002_3634361228', 'V002_3635793088', 'V002_3635857281', 'V002_3636676549', 'V002_3640280281', 'V002_3640364102', 'V002_3643817139', 'V002_3644802684', 'V002_3647684103', 'V002_3648240947', 'V002_3650016608', 'V002_3651596274', 'V002_3653121822', 'V002_3653606854', 'V002_3654663301', 'V002_3658058736', 'V002_3658395075', 'V002_3659525793', 'V002_3660070696', 'V002_3663836277', 'V002_3664340976', 'V002_3664608560', 'V002_3667835367', 'V002_3668945599', 'V002_3669569804', 'V002_3671822255', 'V002_3678009975', 'V002_3678087096', 'V002_3679878499', 'V002_3681024228', 'V002_3687892597', 'V002_3689104377', 'V002_3691958107', 'V002_3691995067', 'V002_3692203795', 'V002_3692359167', 'V002_3694934220', 'V002_3697535314', 'V002_3698959640', 'V002_3699569730', 'V002_3700886583', 'V002_3701330383', 'V002_3712445707', 'V002_3716086967', 'V002_3717633994', 'V002_3722274181', 'V002_3725609615', 'V002_3728170790', 'V002_3729623773', 'V002_3731560298', 'V002_3732655074', 'V002_3734355643', 'V002_3739624605', 'V002_3740156016', 'V002_3743460717', 'V002_3746073290', 'V002_3748011133', 'V002_3748247147', 'V002_3748297501', 'V002_3750759162', 'V002_3752845158', 'V002_3753709827', 'V002_3755499196', 'V002_3756233645', 'V002_3763038825', 'V002_3764074550', 'V002_3765318121', 'V002_3767548148', 'V002_3769757586', 'V002_3772036846', 'V002_3772051827', 'V002_3772713690', 'V002_3773509435', 'V002_3774174011', 'V002_3774556551', 'V002_3777982069', 'V002_3780120007', 'V002_3783497587', 'V002_3783519115', 'V002_3787246032', 'V002_3789518597', 'V002_3791521490', 'V002_3793835158', 'V002_3799895678', 'V002_3802987330', 'V002_3803934644', 'V002_3806111692', 'V002_3808010931', 'V002_3808117624', 'V002_3813713455', 'V002_3814121774', 'V002_3815336564', 'V002_3819228624', 'V002_3823515297', 'V002_3824160762', 'V002_3824989736', 'V002_3826371260', 'V002_3827614851', 'V002_3839031723', 'V002_3840936250', 'V002_3844590661', 'V002_3844778814', 'V002_3845417359', 'V002_3847804935', 'V002_3852723443', 'V002_3858266582', 'V002_3858478839', 'V002_3861613094', 'V002_3864244782', 'V002_3865374615', 'V002_3865909193', 'V002_3866780290', 'V002_3868499094', 'V002_3868714548', 'V002_3869419821', 'V002_3870277972', 'V002_3871190155', 'V002_3875438152', 'V002_3875732794', 'V002_3880678792', 'V002_3880918994', 'V002_3881570267', 'V002_3885805770', 'V002_3889190760', 'V002_3890823672', 'V002_3893447538', 'V002_3894845785', 'V002_3897503210', 'V002_3897759000', 'V002_3899158683', 'V002_3899669042', 'V002_3907410568', 'V002_3911850194', 'V002_3912325416', 'V002_3913353323', 'V002_3913376384', 'V002_3913748717', 'V002_3917034080', 'V002_3919101364', 'V002_3922860602', 'V002_3924352419', 'V002_3929360078', 'V002_3931984707', 'V002_3932323003', 'V002_3932991730', 'V002_3935697762', 'V002_3937870159', 'V002_3938107762', 'V002_3939086014', 'V002_3941285633', 'V002_3943988720', 'V002_3945318942', 'V002_3946154092', 'V002_3947349363', 'V002_3948894156', 'V002_3949279283', 'V002_3950221367', 'V002_3950487225', 'V002_3952296407', 'V002_3953001265', 'V002_3953239759', 'V002_3954756331', 'V002_3956255279', 'V002_3958802483', 'V002_3960450315', 'V002_3960868700', 'V002_3966514971', 'V002_3968506260', 'V002_3970159692', 'V002_3971650221', 'V002_3971833796', 'V002_3972075434', 'V002_3972509140', 'V002_3974942021', 'V002_3975322260', 'V002_3977226622', 'V002_3977619241', 'V002_3979172779', 'V002_3979886426', 'V002_3979945045', 'V002_3987343093', 'V002_3988359434', 'V002_3989032767', 'V002_3992612889', 'V002_3992893916', 'V002_3995809805', 'V002_4000020454', 'V002_4000058955', 'V002_4001846703', 'V002_4003026827', 'V002_4004608329', 'V002_4005815370', 'V002_4006929074', 'V002_4007277523', 'V002_4008045660', 'V002_4011480805', 'V002_4013628619', 'V002_4023689589', 'V002_4023840542', 'V002_4030843097', 'V002_4031024296', 'V002_4031743224', 'V002_4036422407', 'V002_4037146251', 'V002_4038779524', 'V002_4040215515', 'V002_4040582319', 'V002_4041402429', 'V002_4041428263', 'V002_4042162897', 'V002_4042865304', 'V002_4043159452', 'V002_4043710788', 'V002_4043856764', 'V002_4044001065', 'V002_4048496506', 'V002_4049538966', 'V002_4051433574', 'V002_4053845369', 'V002_4059037228', 'V002_4060492435', 'V002_4061627722', 'V002_4061885430', 'V002_4065794133', 'V002_4066914699', 'V002_4072229027', 'V002_4073927061', 'V002_4074405678', 'V002_4074939966', 'V002_4075136532', 'V002_4076545079', 'V002_4077977476', 'V002_4078182782', 'V002_4078979168', 'V002_4081011719', 'V002_4084195728', 'V002_4090522100', 'V002_4091152751', 'V002_4092599601', 'V002_4093215888', 'V002_4097231871', 'V002_4102016348', 'V002_4105064964', 'V002_4109037627', 'V002_4111120742', 'V002_4113554101', 'V002_4114902884', 'V002_4117026002', 'V002_4118165978', 'V002_4120727196', 'V002_4121321860', 'V002_4125553056', 'V002_4126837410', 'V002_4128028747', 'V002_4129304116', 'V002_4132280534', 'V002_4133363691', 'V002_4133550875', 'V002_4133709276', 'V002_4136146386', 'V002_4137536722', 'V002_4138854771', 'V002_4139942357', 'V002_4140144350', 'V002_4143299552', 'V002_4147702363', 'V002_4147786120', 'V002_4153628006', 'V002_4153898141', 'V002_4155270486', 'V002_4159624611', 'V002_4161664398', 'V002_4162789672', 'V002_4163332594', 'V002_4165536801', 'V002_4166094655', 'V002_4168290722', 'V002_4168407398', 'V002_4169775013', 'V002_4170656644', 'V002_4172083561', 'V002_4172657621', 'V002_4175905396', 'V002_4178656707', 'V002_4179561223', 'V002_4179743380', 'V002_4180216662', 'V002_4182076403', 'V002_4183614277', 'V002_4185172981', 'V002_4185364560', 'V002_4187646866', 'V002_4188436072', 'V002_4190089266', 'V002_4190389170', 'V002_4192363246', 'V002_4193630133', 'V002_4196406375', 'V002_4196924448', 'V002_4199115060', 'V002_4199453134', 'V002_4200922046', 'V002_4201152465', 'V002_4203091751', 'V002_4207398873', 'V002_4208401855', 'V002_4209958656', 'V002_4210849394', 'V002_4211045127', 'V002_4212720631', 'V002_4217600777', 'V002_4218053615', 'V002_4219291766', 'V002_4219361838', 'V002_4220518271', 'V002_4224403699', 'V002_4226858025', 'V002_4231838825', 'V002_4231921878', 'V002_4234672525', 'V002_4237556208', 'V002_4238280212', 'V002_4239100038', 'V002_4239758350', 'V002_4240267791', 'V002_4240757468', 'V002_4241243431', 'V002_4246423469', 'V002_4249367003', 'V002_4250711474', 'V002_4251509488', 'V002_4253799425', 'V002_4255497641', 'V002_4260770343', 'V002_4260963037', 'V002_4262839699', 'V002_4264067918', 'V002_4265206919', 'V002_4265428008', 'V002_4266299582', 'V002_4267927080', 'V002_4268065504', 'V002_4269333718', 'V002_4271014053', 'V002_4271368677', 'V002_4272074308', 'V002_4274470389', 'V002_4276963845', 'V002_4277195936', 'V002_4278282169', 'V002_4279196026', 'V002_4280389073', 'V002_4280409577', 'V002_4280772989', 'V002_4280911344', 'V002_4282689307', 'V002_4283714182', 'V002_4287254444', 'V002_4287675597', 'V002_4289994095', 'V002_4290703572', 'V002_4292252342']
| 16,795.75
| 67,157
| 0.789456
| 7,074
| 67,183
| 6.998021
| 0.500707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.721678
| 0.052707
| 67,183
| 4
| 67,157
| 16,795.75
| 0.056174
| 0
| 0
| 0
| 0
| 0
| 0.788804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
e2f7044f127bc051014381b1ed9eeb528e5bd270
| 18,667
|
py
|
Python
|
accelerate_fft_test.py
|
andrej5elin/accelerate_fft
|
5e92b45fc618c9761835e0c6a6d8ad57234f37eb
|
[
"MIT"
] | null | null | null |
accelerate_fft_test.py
|
andrej5elin/accelerate_fft
|
5e92b45fc618c9761835e0c6a6d8ad57234f37eb
|
[
"MIT"
] | null | null | null |
accelerate_fft_test.py
|
andrej5elin/accelerate_fft
|
5e92b45fc618c9761835e0c6a6d8ad57234f37eb
|
[
"MIT"
] | null | null | null |
import unittest
import accelerate_fft as fft
import numpy as np
np.random.seed(0)
f64 = np.array([[1,2,3,4],[4,5,6,7],[1,2,3,1],[3,4,1,2]], "float64")
f32 = np.array([[1,2,3,4],[4,5,6,7],[1,2,3,1],[3,4,1,2]], "float32")
c64 = np.array([[1,2,3,4],[4,5,6,7],[1,2,3,1],[3,4,1,2]], "complex64")
c128 = np.array([[1,2,3,4],[4,5,6,7],[1,2,3,1],[3,4,1,2]], "complex128")
class BaseTest(unittest.TestCase):
def setUp(self):
fft.set_nthreads(1)
def assert_equal(self,a,b, rtol = 1.e-4,atol=1.e-6):
self.assertTrue(np.allclose(a,b,rtol,atol))
class TestShape(BaseTest):
def test_wrong_fft_shape(self):
with self.assertRaises(ValueError):
fft.fft([1,2,3])
def test_wrong_fft2_shape(self):
with self.assertRaises(ValueError):
fft.fft2([[1,2,3],[1,2,3]])
class TestDtype(BaseTest):
def test_rfft_128(self):
with self.assertWarns(np.ComplexWarning):
fft.rfft(c128)
def test_rfft_64(self):
with self.assertWarns(np.ComplexWarning):
fft.rfft(c64)
class TestResults(BaseTest):
def test_rfft_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.unpack(fft.rfft(a))/2., np.fft.rfft(a))
self.assert_equal(fft.unpack(fft.rfft(b))/2., np.fft.rfft(b))
def test_rfft_float_axis(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.unpack(fft.rfft(a,axis = 0), axis = 0)/2., np.fft.rfft(a, axis = 0))
self.assert_equal(fft.unpack(fft.rfft(b,axis = 0),axis = 0)/2., np.fft.rfft(b,axis = 0))
self.assert_equal(fft.unpack(fft.rfft(a,axis = 1), axis = 1)/2., np.fft.rfft(a, axis = 1))
self.assert_equal(fft.unpack(fft.rfft(b,axis = 1),axis = 1)/2., np.fft.rfft(b,axis = 1))
def test_rfft_split_out_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
for x in (a,b):
r,i = fft.rfft(x, split_out = True)
c = r + 1j*i
self.assert_equal(fft.unpack(c)/2., np.fft.rfft(x))
def test_rfft_double(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.unpack(fft.rfft(a))/2., np.fft.rfft(a))
self.assert_equal(fft.unpack(fft.rfft(b))/2., np.fft.rfft(b))
def test_rfft_double_axis(self):
a = np.array(np.random.randn(2,4,8),"float64")
b = a.reshape(2,8,4)
self.assert_equal(fft.unpack(fft.rfft(a,axis = 0), axis = 0)/2., np.fft.rfft(a, axis = 0))
self.assert_equal(fft.unpack(fft.rfft(b,axis = 0),axis = 0)/2., np.fft.rfft(b,axis = 0))
self.assert_equal(fft.unpack(fft.rfft(a,axis = 1), axis = 1)/2., np.fft.rfft(a, axis = 1))
self.assert_equal(fft.unpack(fft.rfft(b,axis = 1),axis = 1)/2., np.fft.rfft(b,axis = 1))
def test_rfft_split_out_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
b = a.reshape(2,8,4)
for x in (a,b):
r,i = fft.rfft(x, split_out = True)
c = r + 1j*i
self.assert_equal(fft.unpack(c)/2., np.fft.rfft(x))
def test_fft_float(self):
a = np.array(np.random.randn(2,4,8),"complex64")
b = a.reshape(2,8,4)
self.assert_equal(fft.fft(a), np.fft.fft(a))
self.assert_equal(fft.fft(b), np.fft.fft(b))
def test_fft_float_axis(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.fft(a, axis = 0), np.fft.fft(a,axis = 0))
self.assert_equal(fft.fft(b,axis = 0), np.fft.fft(b, axis = 0))
self.assert_equal(fft.fft(a, axis = 1), np.fft.fft(a,axis = 1))
self.assert_equal(fft.fft(b,axis = 1), np.fft.fft(b, axis = 1))
def test_fft_double(self):
a = np.array(np.random.randn(2,4,8),"complex128")
b = a.reshape(2,8,4)
self.assert_equal(fft.fft(a), np.fft.fft(a))
self.assert_equal(fft.fft(b), np.fft.fft(b))
def test_fft_double_axis(self):
a = np.array(np.random.randn(2,4,8),"complex64")
b = a.reshape(2,8,4)
self.assert_equal(fft.fft(a, axis = 0), np.fft.fft(a,axis = 0))
self.assert_equal(fft.fft(b,axis = 0), np.fft.fft(b, axis = 0))
self.assert_equal(fft.fft(a, axis = 1), np.fft.fft(a,axis = 1))
self.assert_equal(fft.fft(b,axis = 1), np.fft.fft(b, axis = 1))
def test_fft2_float(self):
a = np.array(np.random.randn(2,4,8),"complex64")
b = a.reshape(2,8,4)
self.assert_equal(fft.fft2(a), np.fft.fft2(a))
self.assert_equal(fft.fft2(b), np.fft.fft2(b))
def test_fft2_float_axes(self):
a = np.array(np.random.randn(2,4,8),"complex64")
b = a.reshape(2,8,4)
self.assert_equal(fft.fft2(a,axes = (0,1)), np.fft.fft2(a,axes = (0,1)))
self.assert_equal(fft.fft2(b,axes = (0,1)), np.fft.fft2(b,axes = (0,1)))
def test_fft2_double(self):
a = np.array(np.random.randn(2,4,8),"complex128")
b = a.reshape(2,8,4)
self.assert_equal(fft.fft2(a), np.fft.fft2(a))
self.assert_equal(fft.fft2(b), np.fft.fft2(b))
def test_fft2_double_axes(self):
a = np.array(np.random.randn(2,4,8),"complex128")
b = a.reshape(2,8,4)
self.assert_equal(fft.fft2(a,axes = (0,1)), np.fft.fft2(a,axes = (0,1)))
self.assert_equal(fft.fft2(b,axes = (0,1)), np.fft.fft2(b,axes = (0,1)))
def test_rfft2_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.unpack2(fft.rfft2(a))/2, np.fft.rfft2(a))
self.assert_equal(fft.unpack2(fft.rfft2(b))/2, np.fft.rfft2(b))
def test_rfft2_float_axes(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.unpack2(fft.rfft2(a, axes = (0,1)),axes = (0,1))/2, np.fft.rfft2(a,axes = (0,1)))
self.assert_equal(fft.unpack2(fft.rfft2(b,axes = (0,1)),axes = (0,1))/2, np.fft.rfft2(b, axes = (0,1)))
def test_rfft2_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
b = a.reshape(2,8,4)
self.assert_equal(fft.unpack2(fft.rfft2(a))/2, np.fft.rfft2(a))
self.assert_equal(fft.unpack2(fft.rfft2(b))/2, np.fft.rfft2(b))
def test_ifft_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.ifft(a)/8, np.fft.ifft(a))
self.assert_equal(fft.ifft(b)/4, np.fft.ifft(b))
def test_ifft_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
b = a.reshape(2,8,4)
self.assert_equal(fft.ifft(a)/8, np.fft.ifft(a))
self.assert_equal(fft.ifft(b)/4, np.fft.ifft(b))
def test_ifft2_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.ifft2(a)/(8*4), np.fft.ifft2(a))
self.assert_equal(fft.ifft2(b)/(8*4), np.fft.ifft2(b))
def test_ifft2_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
b = a.reshape(2,8,4)
self.assert_equal(fft.ifft2(a)/(8*4), np.fft.ifft2(a))
self.assert_equal(fft.ifft2(b)/(8*4), np.fft.ifft2(b))
def test_irfft_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.irfft(fft.rfft(a))/8/2, a)
self.assert_equal(fft.irfft(fft.rfft(b))/4/2, b)
def test_irfft_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
b = a.reshape(2,8,4)
self.assert_equal(fft.irfft(fft.rfft(a))/8/2, a)
self.assert_equal(fft.irfft(fft.rfft(b))/4/2, b)
def test_irfft2_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
b = a.reshape(2,8,4)
self.assert_equal(fft.irfft2(fft.rfft2(a))/8/4/2, a)
self.assert_equal(fft.irfft2(fft.rfft2(b))/4/8/2, b)
def test_irfft2_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
b = a.reshape(2,8,4)
self.assert_equal(fft.irfft2(fft.rfft2(a))/8/4/2, a)
self.assert_equal(fft.irfft2(fft.rfft2(b))/4/8/2, b)
class TestInplaceTransforms(BaseTest):
def test_fft(self):
for dtype in ("complex64", "complex128"):
a0 = np.array(np.random.randn(2,4,8),dtype)
f = fft.fft(a0)
with self.subTest(i = 0):
a = a0.copy()
fft.fft(a, overwrite_x = True)
self.assert_equal(a,f)
with self.subTest(i = 1):
real,imag = a0.real.copy(),a0.imag.copy()
fr,fi = fft.fft((real, imag), split_in = True, split_out = True)
fft.fft((real,imag), overwrite_x = True, split_in = True, split_out = True)
self.assert_equal(fr,real)
self.assert_equal(fi,imag)
with self.subTest(i = 2):
a = a0.copy()
fft.fft(a, split_out = True, overwrite_x = True)
self.assert_equal(a,a0)
with self.subTest(i = 3):
real,imag = a0.real.copy(),a0.imag.copy()
f = fft.fft((real, imag), split_in = True)
fft.fft((real,imag), overwrite_x = True, split_in = True)
self.assert_equal(real, f.real)
self.assert_equal(imag,f.imag)
def test_fft2(self):
for dtype in ("complex64", "complex128"):
a0 = np.array(np.random.randn(2,4,8),dtype)
f = fft.fft2(a0)
with self.subTest(i = 0):
a = a0.copy()
fft.fft2(a, overwrite_x = True)
self.assert_equal(a,f)
with self.subTest(i = 1):
real,imag = a0.real.copy(),a0.imag.copy()
fr,fi = fft.fft2((real, imag), split_in = True, split_out = True)
fft.fft2((real,imag), overwrite_x = True, split_in = True, split_out = True)
self.assert_equal(fr,real)
self.assert_equal(fi,imag)
with self.subTest(i = 2):
a = a0.copy()
fft.fft2(a, split_out = True, overwrite_x = True)
self.assert_equal(a,a0)
with self.subTest(i = 3):
real,imag = a0.real.copy(),a0.imag.copy()
f = fft.fft2((real, imag), split_in = True)
fft.fft2((real,imag), overwrite_x = True, split_in = True)
self.assert_equal(real, f.real)
self.assert_equal(imag,f.imag)
class TestSplitDataTransform(BaseTest):
def test_fft2_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
a_split = a.real.copy(), a.imag.copy()
with self.subTest(i = 0):
self.assert_equal(fft.ifft2(fft.fft2(a, split_out = True), split_in = True)/8/4, a)
with self.subTest(i = 1):
out_split = fft.ifft2(fft.fft2(a, split_out = True), split_in = True, split_out = True)
self.assert_equal(a_split[0], out_split[0]/8/4)
self.assert_equal(a_split[1], out_split[1]/8/4)
with self.subTest(i = 2):
out_split = fft.ifft2(fft.fft2(a), split_out = True)
self.assert_equal(a_split[0], out_split[0]/8/4)
self.assert_equal(a_split[1], out_split[1]/8/4)
with self.subTest(i = 3):
self.assert_equal(fft.ifft2(fft.fft2(a_split, split_in = True))/8/4, a)
with self.subTest(i = 4):
out_split = fft.ifft2(fft.fft2(a_split, split_in = True),split_out = True)
self.assert_equal(a_split[0], out_split[0]/8/4)
self.assert_equal(a_split[1], out_split[1]/8/4)
with self.subTest(i = 5):
out_split = fft.ifft2(fft.fft2(a_split, split_in = True, split_out = True),split_out = True,split_in = True)
self.assert_equal(a_split[0], out_split[0]/8/4)
self.assert_equal(a_split[1], out_split[1]/8/4)
def test_fft2_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
a_split = a.real.copy(), a.imag.copy()
with self.subTest(i = 0):
self.assert_equal(fft.ifft2(fft.fft2(a, split_out = True), split_in = True)/8/4, a)
with self.subTest(i = 1):
out_split = fft.ifft2(fft.fft2(a, split_out = True), split_in = True, split_out = True)
self.assert_equal(a_split[0], out_split[0]/8/4)
self.assert_equal(a_split[1], out_split[1]/8/4)
with self.subTest(i = 2):
out_split = fft.ifft2(fft.fft2(a), split_out = True)
self.assert_equal(a_split[0], out_split[0]/8/4)
self.assert_equal(a_split[1], out_split[1]/8/4)
with self.subTest(i = 3):
self.assert_equal(fft.ifft2(fft.fft2(a_split, split_in = True))/8/4, a)
with self.subTest(i = 4):
out_split = fft.ifft2(fft.fft2(a_split, split_in = True),split_out = True)
self.assert_equal(a_split[0], out_split[0]/8/4)
self.assert_equal(a_split[1], out_split[1]/8/4)
with self.subTest(i = 5):
out_split = fft.ifft2(fft.fft2(a_split, split_in = True, split_out = True),split_out = True,split_in = True)
self.assert_equal(a_split[0], out_split[0]/8/4)
self.assert_equal(a_split[1], out_split[1]/8/4)
def test_fft_double(self):
a = np.array(np.random.randn(8),"complex128")
a0 = a.copy()
a_split = a.real.copy(), a.imag.copy()
with self.subTest(i = 0):
out = fft.ifft(fft.fft(a, split_out = True), split_in = True)/8
self.assert_equal(out, a0)
with self.subTest(i = 1):
out_split = fft.ifft(fft.fft(a, split_out = True), split_in = True, split_out = True)
self.assert_equal(a0.real, out_split[0]/8)
self.assert_equal(a_split[1], out_split[1]/8)
with self.subTest(i = 2):
out_split = fft.ifft(fft.fft(a), split_out = True)
self.assert_equal(a_split[0], out_split[0]/8)
self.assert_equal(a_split[1], out_split[1]/8)
with self.subTest(i = 3):
self.assert_equal(fft.ifft(fft.fft(a_split, split_in = True))/8, a)
with self.subTest(i = 4):
out_split = fft.ifft(fft.fft(a_split, split_in = True),split_out = True)
self.assert_equal(a_split[0], out_split[0]/8)
self.assert_equal(a_split[1], out_split[1]/8)
with self.subTest(i = 5):
out_split = fft.ifft(fft.fft(a_split, split_in = True, split_out = True),split_out = True,split_in = True)
self.assert_equal(a_split[0], out_split[0]/8)
self.assert_equal(a_split[1], out_split[1]/8)
def test_fft_float(self):
a = np.array(np.random.randn(8),"complex64")
a0 = a.copy()
a_split = a.real.copy(), a.imag.copy()
with self.subTest(i = 0):
out = fft.ifft(fft.fft(a, split_out = True), split_in = True)/8
self.assert_equal(out, a0)
with self.subTest(i = 1):
out_split = fft.ifft(fft.fft(a, split_out = True), split_in = True, split_out = True)
self.assert_equal(a0.real, out_split[0]/8)
self.assert_equal(a_split[1], out_split[1]/8)
with self.subTest(i = 2):
out_split = fft.ifft(fft.fft(a), split_out = True)
self.assert_equal(a_split[0], out_split[0]/8)
self.assert_equal(a_split[1], out_split[1]/8)
with self.subTest(i = 3):
self.assert_equal(fft.ifft(fft.fft(a_split, split_in = True))/8, a)
with self.subTest(i = 4):
out_split = fft.ifft(fft.fft(a_split, split_in = True),split_out = True)
self.assert_equal(a_split[0], out_split[0]/8)
self.assert_equal(a_split[1], out_split[1]/8)
with self.subTest(i = 5):
out_split = fft.ifft(fft.fft(a_split, split_in = True, split_out = True),split_out = True,split_in = True)
self.assert_equal(a_split[0], out_split[0]/8)
self.assert_equal(a_split[1], out_split[1]/8)
def test_rfft2_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
with self.subTest(i = 0):
self.assert_equal(fft.irfft2(fft.rfft2(a, split_out = True), split_in = True)/8/4/2, a)
def test_rfft2_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
with self.subTest(i = 0):
self.assert_equal(fft.irfft2(fft.rfft2(a, split_out = True), split_in = True)/8/4/2, a)
def test_rfft_double(self):
a = np.array(np.random.randn(2,4,8),"float64")
with self.subTest(i = 0):
self.assert_equal(fft.irfft(fft.rfft(a, split_out = True), split_in = True)/8/2, a)
def test_rfft_float(self):
a = np.array(np.random.randn(2,4,8),"float32")
with self.subTest(i = 0):
self.assert_equal(fft.irfft(fft.rfft(a, split_out = True), split_in = True)/8/2, a)
class TestResultsThreaded(TestResults):
def setUp(self):
fft.set_nthreads(2)
class TestSplitDataTransformThreaded(TestSplitDataTransform):
def setUp(self):
fft.set_nthreads(2)
class TestInplaceTransformThreaded(TestInplaceTransforms):
def setUp(self):
fft.set_nthreads(2)
class TestSetup(BaseTest):
def test_fftsetup(self):
fft.destroy_fftsetup()
s1 = fft.create_fftsetup(5)
s2 = fft.create_fftsetup(4) #must return previous setup
self.assertTrue(s1 is s2)
s3 = fft.create_fftsetup(7) #must be new setup
self.assertTrue(s3 is not s2)
fft.destroy_fftsetup()
class TestUnpack(BaseTest):
def test_unpack(self):
a = np.random.randn(8,4) + np.random.randn(8,4)*1j
b = fft.unpack(a)
c = fft.unpack(a, inplace = True)
self.assert_equal(b[...,0:-1],c)
def test_unpack2(self):
a = np.random.randn(2,8,4) + np.random.randn(2,8,4)*1j
b = fft.unpack2(a)
c = fft.unpack2(a, inplace = True)
self.assert_equal(b[...,0:-1],c)
if __name__ == '__main__':
unittest.main()
| 43.411628
| 120
| 0.567472
| 3,063
| 18,667
| 3.326804
| 0.037871
| 0.124141
| 0.167812
| 0.116585
| 0.904809
| 0.897841
| 0.890775
| 0.877527
| 0.854858
| 0.849558
| 0
| 0.059076
| 0.263674
| 18,667
| 430
| 121
| 43.411628
| 0.682284
| 0.002304
| 0
| 0.768156
| 0
| 0
| 0.017827
| 0
| 0
| 0
| 0
| 0
| 0.340782
| 1
| 0.131285
| false
| 0
| 0.00838
| 0
| 0.170391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3908e8df91a03704b0d8e413ce94b2cb9dee98fd
| 471
|
py
|
Python
|
rakmai/helpers.py
|
pincoin/rakmai
|
d9daa399aff50712a86b2dec9d94e622237b25b0
|
[
"MIT"
] | 11
|
2018-04-02T16:36:19.000Z
|
2019-07-10T05:54:58.000Z
|
rakmai/helpers.py
|
pincoin/rakmai
|
d9daa399aff50712a86b2dec9d94e622237b25b0
|
[
"MIT"
] | 22
|
2019-01-01T20:40:21.000Z
|
2022-02-10T08:06:39.000Z
|
rakmai/helpers.py
|
pincoin/rakmai
|
d9daa399aff50712a86b2dec9d94e622237b25b0
|
[
"MIT"
] | 4
|
2019-03-12T14:24:37.000Z
|
2022-01-07T16:20:22.000Z
|
def get_sub_domain(request):
host = request.META.get('HTTP_HOST', '').split('.', 1)
if len(host) == 2:
return host[0]
def get_domain(request):
host = request.META.get('HTTP_HOST', '').split('.', 1)
if len(host) == 2:
return host[1]
def get_domains(request):
host = request.META.get('HTTP_HOST', '').split('.', 1)
if len(host) == 2:
return {
'sub_domain': host[0],
'domain': host[1]
}
| 20.478261
| 58
| 0.526539
| 63
| 471
| 3.809524
| 0.253968
| 0.075
| 0.225
| 0.275
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0
| 0.029412
| 0.278132
| 471
| 22
| 59
| 21.409091
| 0.676471
| 0
| 0
| 0.4
| 0
| 0
| 0.097665
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
394883c265e75b5da0cf19c8fc3edbce168b75d0
| 93
|
py
|
Python
|
signal_processing/__init__.py
|
dwil2444/PW_Processing
|
7b7914ed6cf0d4eb5d8fb3298599d79b0791219a
|
[
"Apache-2.0"
] | null | null | null |
signal_processing/__init__.py
|
dwil2444/PW_Processing
|
7b7914ed6cf0d4eb5d8fb3298599d79b0791219a
|
[
"Apache-2.0"
] | null | null | null |
signal_processing/__init__.py
|
dwil2444/PW_Processing
|
7b7914ed6cf0d4eb5d8fb3298599d79b0791219a
|
[
"Apache-2.0"
] | null | null | null |
from .identify_fid_points import identify_fid_points
from .preprocessing import preprocessing
| 46.5
| 52
| 0.903226
| 12
| 93
| 6.666667
| 0.5
| 0.275
| 0.425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075269
| 93
| 2
| 53
| 46.5
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
394e4ca891a93a23006a6d9666d3c0bdfa9938ad
| 179
|
py
|
Python
|
umtk/__init__.py
|
kyle0x54/umtk
|
883090d84fce924e65184847e6b3048014616f5d
|
[
"Apache-2.0"
] | 1
|
2020-08-03T12:27:02.000Z
|
2020-08-03T12:27:02.000Z
|
umtk/__init__.py
|
kyle0x54/umtk
|
883090d84fce924e65184847e6b3048014616f5d
|
[
"Apache-2.0"
] | null | null | null |
umtk/__init__.py
|
kyle0x54/umtk
|
883090d84fce924e65184847e6b3048014616f5d
|
[
"Apache-2.0"
] | 1
|
2020-11-28T03:27:10.000Z
|
2020-11-28T03:27:10.000Z
|
# flake8: noqa
from .__version__ import __version__
from .cupy_utils import *
from .error_handling import *
from .image import *
from .utils import *
from .visualization import *
| 22.375
| 36
| 0.776536
| 23
| 179
| 5.608696
| 0.478261
| 0.310078
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006579
| 0.150838
| 179
| 7
| 37
| 25.571429
| 0.842105
| 0.067039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1a5236670d419ad20fea1a4408b03357ccb0a377
| 9,215
|
py
|
Python
|
wuqian/wuqian/migrations/0001_initial.py
|
Broadroad/pyWebsite
|
dd54b3511d5d8740a85824fa4c69b7b92adff62f
|
[
"Apache-2.0"
] | null | null | null |
wuqian/wuqian/migrations/0001_initial.py
|
Broadroad/pyWebsite
|
dd54b3511d5d8740a85824fa4c69b7b92adff62f
|
[
"Apache-2.0"
] | null | null | null |
wuqian/wuqian/migrations/0001_initial.py
|
Broadroad/pyWebsite
|
dd54b3511d5d8740a85824fa4c69b7b92adff62f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='HomePage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image_1', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe1')),
('url_1', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe1\xe9\x93\xbe\xe6\x8e\xa5')),
('image_2', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe2')),
('url_2', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe2\xe9\x93\xbe\xe6\x8e\xa5')),
('image_3', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe3')),
('url_3', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe3\xe9\x93\xbe\xe6\x8e\xa5')),
('image_4', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe4')),
('url_4', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe6\xb5\xb7\xe6\x8a\xa5\xe5\x9b\xbe4\xe9\x93\xbe\xe6\x8e\xa5')),
('image_about', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe5\x85\xb3\xe4\xba\x8e\xe6\x88\x91\xe4\xbb\xac\xe5\x9b\xbe\xe7\x89\x87')),
('data_about', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe5\x85\xb3\xe4\xba\x8e\xe6\x88\x91\xe4\xbb\xac\xe7\xae\x80\xe4\xbb\x8b')),
('url_about', models.CharField(max_length=1000, verbose_name=b'\xe9\xa6\x96\xe9\xa1\xb5\xe5\x85\xb3\xe4\xba\x8e\xe6\x88\x91\xe4\xbb\xac\xe7\xae\x80\xe4\xbb\x8b\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaizhongzhi_1', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe5\x9b\xbe\xe7\x89\x871')),
('data_shengtaizhongzhi_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b1')),
('url_shengtaizhongzhi_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b1\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaizhongzhi_2', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe5\x9b\xbe\xe7\x89\x872')),
('data_shengtaizhongzhi_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b2')),
('url_shengtaizhongzhi_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b2\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaizhongzhi_3', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe5\x9b\xbe\xe7\x89\x873')),
('data_shengtaizhongzhi_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b3')),
('url_shengtaizhongzhi_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b3\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaizhongzhi_4', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe5\x9b\xbe\xe7\x89\x874')),
('data_shengtaizhongzhi_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b4')),
('url_shengtaizhongzhi_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe7\xa7\x8d\xe6\xa4\x8d\xe7\xae\x80\xe4\xbb\x8b4\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaiyangzhi_1', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe5\x9b\xbe\xe7\x89\x871')),
('data_shengtaiyangzhi_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b1')),
('url_shengtaiyangzhi_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b1\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaiyangzhi_2', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe5\x9b\xbe\xe7\x89\x872')),
('data_shengtaiyangzhi_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b2')),
('url_shengtaiyangzhi_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b2\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaiyangzhi_3', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe5\x9b\xbe\xe7\x89\x873')),
('data_shengtaiyangzhi_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b3')),
('url_shengtaiyangzhi_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b3\xe9\x93\xbe\xe6\x8e\xa5')),
('image_shengtaiyangzhi_4', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe5\x9b\xbe\xe7\x89\x874')),
('data_shengtaiyangzhi_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b4')),
('url_shengtaiyangzhi_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x94\x9f\xe6\x80\x81\xe5\x85\xbb\xe6\xae\x96\xe7\xae\x80\xe4\xbb\x8b4\xe9\x93\xbe\xe6\x8e\xa5')),
('image_kuangchanziyuan_1', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe5\x9b\xbe\xe7\x89\x871')),
('data_kuangchanziyuan_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b1')),
('url_kuangchanziyuan_1', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b1\xe9\x93\xbe\xe6\x8e\xa5')),
('image_kuangchanziyuan_2', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe5\x9b\xbe\xe7\x89\x872')),
('data_kuangchanziyuan_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b2')),
('url_kuangchanziyuan_2', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b2\xe9\x93\xbe\xe6\x8e\xa5')),
('image_kuangchanziyuan_3', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe5\x9b\xbe\xe7\x89\x873')),
('data_kuangchanziyuan_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b3')),
('url_kuangchanziyuan_3', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b3\xe9\x93\xbe\xe6\x8e\xa5')),
('image_kuangchanziyuan_4', models.ImageField(upload_to=b'media/%Y/%m/%d', null=True, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe5\x9b\xbe\xe7\x89\x874')),
('data_kuangchanziyuan_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b4')),
('url_kuangchanziyuan_4', models.CharField(max_length=1000, verbose_name=b'\xe7\x9f\xbf\xe4\xba\xa7\xe8\xb5\x84\xe6\xba\x90\xe7\xae\x80\xe4\xbb\x8b4\xe9\x93\xbe\xe6\x8e\xa5')),
],
options={
'verbose_name': '\u9996\u9875\u8bbe\u7f6e',
'verbose_name_plural': '\u9996\u9875\u8bbe\u7f6e',
},
),
]
| 129.788732
| 205
| 0.680955
| 1,603
| 9,215
| 3.797255
| 0.068621
| 0.090357
| 0.092656
| 0.088714
| 0.942665
| 0.9402
| 0.9402
| 0.903401
| 0.869394
| 0.868737
| 0
| 0.150616
| 0.118828
| 9,215
| 70
| 206
| 131.642857
| 0.599015
| 0.002279
| 0
| 0
| 0
| 0.734375
| 0.546453
| 0.507398
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.078125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1a53a1fc59495003a977436b925767222d0f1230
| 8,359
|
py
|
Python
|
tests/test_handler.py
|
teamhide/fastapi-event
|
5b76db3dc605c34ffaa4e7be4db264641d0f5bdc
|
[
"Apache-2.0"
] | 23
|
2021-12-15T15:43:30.000Z
|
2022-03-29T09:23:27.000Z
|
tests/test_handler.py
|
teamhide/fastapi-event
|
5b76db3dc605c34ffaa4e7be4db264641d0f5bdc
|
[
"Apache-2.0"
] | 1
|
2021-12-16T10:46:32.000Z
|
2021-12-19T09:49:11.000Z
|
tests/test_handler.py
|
teamhide/fastapi-event
|
5b76db3dc605c34ffaa4e7be4db264641d0f5bdc
|
[
"Apache-2.0"
] | 1
|
2022-01-28T08:49:24.000Z
|
2022-01-28T08:49:24.000Z
|
from typing import Union, Type
import pytest
from pydantic import BaseModel
from fastapi_event import event_handler, BaseEvent, EventListener
from fastapi_event.exceptions import (
InvalidEventTypeException,
InvalidParameterTypeException,
ParameterCountException,
RequiredParameterException,
)
from tests.events import (
TestSecondEvent,
TestEventParameterNotNone,
TestEvent,
TestEventDoNotHaveParameter,
TestEventParameter,
)
class FirstEvent(BaseEvent):
ORDER = 1
async def run(self, parameter: Union[Type[BaseModel], None] = None) -> None:
...
class SecondEvent(BaseEvent):
ORDER = 2
async def run(self, parameter: Union[Type[BaseModel], None] = None) -> None:
...
class NoneOrderEvent(BaseEvent):
async def run(self, parameter: Union[Type[BaseModel], None] = None) -> None:
...
@pytest.mark.asyncio
async def test_store_without_parameter(app_with_middleware, client):
app = app_with_middleware
async def test():
await event_handler.store(
event=TestEvent
)
handler = event_handler._get_event_handler()
assert TestEvent in handler.events
assert handler.events[TestEvent] is None
@app.get("/")
async def get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_multiple_store_without_parameter(app_with_middleware, client):
app = app_with_middleware
async def test():
await event_handler.store(
event=TestEvent
)
await event_handler.store(
event=TestSecondEvent
)
handler = event_handler._get_event_handler()
assert len(handler.events) == 2
assert TestEvent in handler.events
assert TestSecondEvent in handler.events
assert handler.events[TestEvent] is None
assert handler.events[TestSecondEvent] is None
@app.get("/")
async def get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_store_with_parameter(app_with_middleware, client):
app = app_with_middleware
async def test():
await event_handler.store(
event=TestEvent,
parameter=TestEventParameter(content="content"),
)
handler = event_handler._get_event_handler()
assert len(handler.events) == 1
assert TestEvent in handler.events
assert isinstance(handler.events[TestEvent], TestEventParameter)
@app.get("/")
async def get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_multiple_store_with_parameter(app_with_middleware, client):
app = app_with_middleware
async def test():
await event_handler.store(
event=TestEvent,
parameter=TestEventParameter(content="content"),
)
await event_handler.store(
event=TestSecondEvent,
parameter=TestEventParameter(content="content"),
)
handler = event_handler._get_event_handler()
assert len(handler.events) == 2
assert TestEvent in handler.events
assert TestSecondEvent in handler.events
assert isinstance(handler.events[TestEvent], TestEventParameter)
assert isinstance(handler.events[TestSecondEvent], TestEventParameter)
@app.get("/")
async def get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_store_with_invalid_event_type_exception(app_with_middleware, client):
app = app_with_middleware
class InvalidTypeEvent:
pass
async def test():
with pytest.raises(InvalidEventTypeException):
await event_handler.store(
event=InvalidTypeEvent
)
@app.get("/")
async def get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_store_with_invalid_parameter_type_exception(app_with_middleware, client):
app = app_with_middleware
async def test():
with pytest.raises(InvalidParameterTypeException):
await event_handler.store(
event=TestEvent,
parameter="a",
)
@app.get("/")
async def get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_store_with_invalid_parameter_count_exception(app_with_middleware, client):
app = app_with_middleware
async def test():
with pytest.raises(ParameterCountException):
await event_handler.store(
event=TestEventDoNotHaveParameter,
)
@app.get("/")
async def get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_store_with_required_parameter_exception(app_with_middleware, client):
app = app_with_middleware
async def test():
with pytest.raises(RequiredParameterException):
await event_handler.store(
event=TestEventParameterNotNone,
)
@app.get("/")
async def get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_order(app_with_middleware, client):
app = app_with_middleware
@EventListener()
async def test():
await event_handler.store(event=FirstEvent)
await event_handler.store(event=SecondEvent)
maps = await event_handler._get_event_handler()._get_sorted_event_maps()
assert maps.get(None) == []
assert len(maps.get(1)) == 1
assert maps.get(1)[0].event == FirstEvent
assert maps.get(1)[0].parameter is None
assert len(maps.get(2)) == 1
assert maps.get(2)[0].event == SecondEvent
assert maps.get(2)[0].parameter is None
@app.get("/")
async def test_get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_order_if_the_order_of_the_store_is_different(app_with_middleware, client):
app = app_with_middleware
@EventListener()
async def test():
await event_handler.store(event=SecondEvent)
await event_handler.store(event=FirstEvent)
maps = await event_handler._get_event_handler()._get_sorted_event_maps()
assert maps.get(None) == []
assert len(maps.get(1)) == 1
assert maps.get(1)[0].event == FirstEvent
assert maps.get(1)[0].parameter is None
assert len(maps.get(2)) == 1
assert maps.get(2)[0].event == SecondEvent
assert maps.get(2)[0].parameter is None
@app.get("/")
async def test_get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_order_with_none_order(app_with_middleware, client):
app = app_with_middleware
@EventListener()
async def test():
await event_handler.store(event=FirstEvent)
await event_handler.store(event=SecondEvent)
await event_handler.store(event=NoneOrderEvent)
maps = await event_handler._get_event_handler()._get_sorted_event_maps()
assert maps.get(None)[0].event == NoneOrderEvent
assert maps.get(None)[0].parameter is None
assert len(maps.get(1)) == 1
assert maps.get(1)[0].event == FirstEvent
assert maps.get(1)[0].parameter is None
assert len(maps.get(2)) == 1
assert maps.get(2)[0].event == SecondEvent
assert maps.get(2)[0].parameter is None
@app.get("/")
async def test_get():
await test()
client.get("/")
@pytest.mark.asyncio
async def test_order_with_none_order_if_the_order_of_the_store_is_different(app_with_middleware, client):
app = app_with_middleware
@EventListener()
async def test():
await event_handler.store(event=SecondEvent)
await event_handler.store(event=FirstEvent)
await event_handler.store(event=NoneOrderEvent)
maps = await event_handler._get_event_handler()._get_sorted_event_maps()
assert maps.get(None)[0].event == NoneOrderEvent
assert maps.get(None)[0].parameter is None
assert len(maps.get(1)) == 1
assert maps.get(1)[0].event == FirstEvent
assert maps.get(1)[0].parameter is None
assert len(maps.get(2)) == 1
assert maps.get(2)[0].event == SecondEvent
assert maps.get(2)[0].parameter is None
@app.get("/")
async def test_get():
await test()
client.get("/")
| 26.536508
| 105
| 0.654145
| 974
| 8,359
| 5.422998
| 0.072895
| 0.059069
| 0.063612
| 0.083302
| 0.85763
| 0.842295
| 0.816547
| 0.794964
| 0.794964
| 0.762022
| 0
| 0.008998
| 0.242134
| 8,359
| 314
| 106
| 26.621019
| 0.824783
| 0
| 0
| 0.744589
| 0
| 0
| 0.005503
| 0
| 0
| 0
| 0
| 0
| 0.194805
| 1
| 0
| false
| 0.004329
| 0.025974
| 0
| 0.051948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1a77385031a63c60e2cf006344efe1b4833b7dd9
| 16,471
|
py
|
Python
|
tests/test_core.py
|
apilytics/apilytics-python
|
c917331955c6f9b7469175df7838b7fd40dee33f
|
[
"MIT"
] | 5
|
2022-01-06T17:30:13.000Z
|
2022-01-16T11:38:31.000Z
|
tests/test_core.py
|
apilytics/apilytics-python
|
c917331955c6f9b7469175df7838b7fd40dee33f
|
[
"MIT"
] | 18
|
2022-01-08T12:58:49.000Z
|
2022-03-27T16:41:53.000Z
|
tests/test_core.py
|
apilytics/apilytics-python
|
c917331955c6f9b7469175df7838b7fd40dee33f
|
[
"MIT"
] | null | null | null |
import platform
import sys
import textwrap
import time
import unittest.mock
import urllib.error
import apilytics.core
import tests.conftest
# Restore the real sleep behavior for this one test for thoroughness.
@unittest.mock.patch("apilytics.core.time.sleep", new=time.sleep)
def test_apilytics_sender_should_call_apilytics_api(
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_urlopen.call_count == 1
call_args, call_kwargs = mocked_urlopen.call_args
assert not call_args
assert call_kwargs.keys() == {"url", "data"}
api_request = call_kwargs["url"]
assert api_request.full_url == "https://www.apilytics.io/api/v1/middleware"
assert api_request.method == "POST"
assert api_request.headers == {
# urllib calls `capitalize()` on the header keys.
"Content-type": "application/json",
"X-api-key": "dummy-key",
"Apilytics-version": f"apilytics-python-core/{apilytics.__version__};python/{platform.python_version()};;{sys.platform}",
}
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert data.keys() == {
"path",
"method",
"statusCode",
"timeMillis",
*(
("cpuUsage", "memoryUsage", "memoryTotal")
if platform.system() == "Linux"
else ()
),
}
assert data["path"] == "/"
assert data["method"] == "GET"
assert data["statusCode"] == 200
assert isinstance(data["timeMillis"], int)
if platform.system() == "Linux":
assert 0 <= data["cpuUsage"] <= 1
assert data["memoryUsage"] > 0
assert data["memoryTotal"] > data["memoryUsage"]
def test_apilytics_sender_should_send_query_params(
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/path",
query="key=value?other=123",
method="PUT",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert data["path"] == "/path"
assert data["query"] == "key=value?other=123"
def test_apilytics_sender_should_not_send_empty_query_params(
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
query="",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert "query" not in data
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
query=None,
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_urlopen.call_count == 2
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert "query" not in data
def test_apilytics_sender_should_handle_empty_values_correctly(
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="",
method="",
query="",
request_size=None,
user_agent=None,
apilytics_integration=None,
integrated_library=None,
) as sender:
sender.set_response_info(status_code=None, response_size=None)
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert data.keys() == {
"path",
"method",
"timeMillis",
*(
("cpuUsage", "memoryUsage", "memoryTotal")
if platform.system() == "Linux"
else ()
),
}
assert data["path"] == ""
assert data["method"] == ""
assert isinstance(data["timeMillis"], int)
if platform.system() == "Linux":
assert isinstance(data["cpuUsage"], float)
assert isinstance(data["memoryUsage"], int)
assert isinstance(data["memoryTotal"], int)
@unittest.mock.patch("apilytics.core.platform.system", return_value="Linux")
@unittest.mock.patch("apilytics.core._get_used_and_total_memory", return_value=(0, 0))
def test_apilytics_sender_should_read_proc_stat_on_linux(
_mocked_system: unittest.mock.MagicMock,
_mocked_memory: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
mocked_stat_start = textwrap.dedent(
"""\
cpu 27133 0 33621 13668027 1459 0 508 10 100 100
cpu0 7260 0 7391 3420134 484 0 80 0 0 0
cpu1 7346 0 9306 3412338 138 0 82 0 0 0
""" # The real file is longer.
)
mocked_stat_end = textwrap.dedent(
"""\
cpu 28869 0 33657 13680890 1460 0 508 20 200 200
cpu0 7263 0 7398 3423775 484 0 80 0 0 0
cpu1 9069 0 9314 3414266 138 0 82 0 0 0
"""
)
with unittest.mock.patch(
"builtins.open",
new=unittest.mock.mock_open(read_data=mocked_stat_start),
) as mocked_open:
mocked_open.side_effect = (
mocked_open.return_value,
unittest.mock.mock_open(read_data=mocked_stat_end).return_value,
)
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 2
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
# Totals ignore last two "guest" fields.
total_start = 27133 + 0 + 33621 + 13668027 + 1459 + 0 + 508 + 10
total_end = 28869 + 0 + 33657 + 13680890 + 1460 + 0 + 508 + 20
# Idles combine idle and iowait.
idle_start = 13668027 + 1459
idle_end = 13680890 + 1460
assert data["cpuUsage"] == 1 - (idle_end - idle_start) / (total_end - total_start)
assert data["cpuUsage"] == 0.12167144612863579
@unittest.mock.patch("apilytics.core.platform.system", return_value="Linux")
@unittest.mock.patch("apilytics.core._get_used_and_total_memory", return_value=(0, 0))
def test_apilytics_sender_should_handle_proc_stat_read_failure(
_mocked_system: unittest.mock.MagicMock,
_mocked_memory: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with unittest.mock.patch("builtins.open", side_effect=OSError) as mocked_open:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 1
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert "cpuUsage" not in data
@unittest.mock.patch("apilytics.core.platform.system", return_value="Linux")
@unittest.mock.patch("apilytics.core._get_used_and_total_memory", return_value=(0, 0))
def test_apilytics_sender_should_handle_proc_stat_iowait_missing(
_mocked_system: unittest.mock.MagicMock,
_mocked_memory: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
mocked_stat_start = "cpu 27133 0 33621 13668027"
mocked_stat_end = "cpu 28869 0 33657 13680890"
with unittest.mock.patch(
"builtins.open",
new=unittest.mock.mock_open(read_data=mocked_stat_start),
) as mocked_open:
mocked_open.side_effect = (
mocked_open.return_value,
unittest.mock.mock_open(read_data=mocked_stat_end).return_value,
)
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 2
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
total_start = 27133 + 0 + 33621 + 13668027
total_end = 28869 + 0 + 33657 + 13680890
idle_start = 13668027
idle_end = 13680890
assert data["cpuUsage"] == 1 - (idle_end - idle_start) / (total_end - total_start)
assert data["cpuUsage"] == 0.12107960368978476
@unittest.mock.patch("apilytics.core.platform.system", return_value="Linux")
@unittest.mock.patch("apilytics.core._get_used_and_total_memory", return_value=(0, 0))
def test_apilytics_sender_should_handle_proc_stat_timers_not_increased_zero_division(
_mocked_system: unittest.mock.MagicMock,
_mocked_memory: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
mocked_stat = "cpu 27133 0 33621 13668027"
with unittest.mock.patch(
"builtins.open",
new=unittest.mock.mock_open(read_data=mocked_stat),
) as mocked_open:
mocked_open.side_effect = (
mocked_open.return_value,
unittest.mock.mock_open(read_data=mocked_stat).return_value,
)
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 2
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert data["cpuUsage"] == 0.0
@unittest.mock.patch("apilytics.core.platform.system", return_value="Windows")
@unittest.mock.patch("apilytics.core._get_used_and_total_memory", return_value=(0, 0))
def test_apilytics_sender_should_not_read_proc_stat_when_not_on_linux(
_mocked_system: unittest.mock.MagicMock,
_mocked_memory: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with unittest.mock.patch("builtins.open") as mocked_open:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 0
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert "cpuUsage" not in data
@unittest.mock.patch("apilytics.core.platform.system", return_value="Linux")
@unittest.mock.patch("apilytics.core._get_cpu_usage", return_value=0.0)
def test_apilytics_sender_should_read_proc_meminfo_on_linux(
_mocked_system: unittest.mock.MagicMock,
_mocked_cpu_usage: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
memory_total = 4_125_478_912
memory_available = 3_360_526_336
mocked_meminfo = textwrap.dedent(
f"""\
MemTotal: {memory_total // 1024} kB
MemFree: 789940 kB
MemAvailable: {memory_available // 1024} kB
Buffers: 2450168 kB
""" # The real file is longer.
)
with unittest.mock.patch(
"builtins.open", new=unittest.mock.mock_open(read_data=mocked_meminfo)
) as mocked_open:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 1
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert data["memoryUsage"] == memory_total - memory_available
assert data["memoryTotal"] == memory_total
@unittest.mock.patch("apilytics.core.platform.system", return_value="Linux")
@unittest.mock.patch("apilytics.core._get_cpu_usage", return_value=0.0)
def test_apilytics_sender_should_handle_proc_meminfo_read_failure(
_mocked_system: unittest.mock.MagicMock,
_mocked_cpu_usage: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with unittest.mock.patch("builtins.open", side_effect=OSError) as mocked_open:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 1
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert "memoryUsage" not in data
assert "memoryTotal" not in data
@unittest.mock.patch("apilytics.core.platform.system", return_value="Linux")
@unittest.mock.patch("apilytics.core._get_cpu_usage", return_value=0.0)
def test_apilytics_sender_should_handle_proc_meminfo_total_missing(
_mocked_system: unittest.mock.MagicMock,
_mocked_cpu_usage: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with unittest.mock.patch(
"builtins.open", new=unittest.mock.mock_open(read_data="")
) as mocked_open:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 1
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert "memoryUsage" not in data
assert "memoryTotal" not in data
@unittest.mock.patch("apilytics.core.platform.system", return_value="Linux")
@unittest.mock.patch("apilytics.core._get_cpu_usage", return_value=0.0)
def test_apilytics_sender_should_handle_proc_meminfo_available_missing(
_mocked_system: unittest.mock.MagicMock,
_mocked_cpu_usage: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
memory_total = 1048576
with unittest.mock.patch(
"builtins.open",
new=unittest.mock.mock_open(read_data=f"MemTotal: {memory_total // 1024}"),
) as mocked_open:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 1
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert "memoryUsage" not in data
assert data["memoryTotal"] == memory_total
@unittest.mock.patch("apilytics.core.platform.system", return_value="Windows")
@unittest.mock.patch("apilytics.core._get_cpu_usage", return_value=0.0)
def test_apilytics_sender_should_not_read_proc_meminfo_when_not_on_linux(
_mocked_system: unittest.mock.MagicMock,
_mocked_cpu_usage: unittest.mock.MagicMock,
mocked_urlopen: unittest.mock.MagicMock,
) -> None:
with unittest.mock.patch("builtins.open") as mocked_open:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_open.call_count == 0
assert mocked_urlopen.call_count == 1
__, call_kwargs = mocked_urlopen.call_args
data = tests.conftest.decode_request_data(call_kwargs["data"])
assert "memoryUsage" not in data
assert "memoryTotal" not in data
@unittest.mock.patch(
"apilytics.core.urllib.request.urlopen",
side_effect=urllib.error.URLError("testing"),
)
def test_apilytics_sender_should_hide_http_errors(
mocked_erroring_urlopen: unittest.mock.MagicMock,
) -> None:
with apilytics.core.ApilyticsSender(
api_key="dummy-key",
path="/",
method="GET",
) as sender:
sender.set_response_info(status_code=200)
assert mocked_erroring_urlopen.call_count == 1
| 36.041575
| 129
| 0.680287
| 2,053
| 16,471
| 5.16756
| 0.100828
| 0.087096
| 0.069281
| 0.053916
| 0.849562
| 0.819964
| 0.802243
| 0.799133
| 0.782449
| 0.782449
| 0
| 0.043152
| 0.207881
| 16,471
| 456
| 130
| 36.120614
| 0.769985
| 0.014268
| 0
| 0.724936
| 0
| 0
| 0.136572
| 0.050853
| 0
| 0
| 0
| 0
| 0.172237
| 1
| 0.03856
| false
| 0
| 0.020566
| 0
| 0.059126
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1a87ba0dbd0c92951b7e93c591d8359d989a213b
| 13,056
|
py
|
Python
|
dnac_trigger.py
|
CiscoTestAutomation/DNAC-pyATS-Genie
|
5e605adf5df0fa075ab06ae724a6b59c56d27495
|
[
"Apache-2.0"
] | 1
|
2020-05-24T11:50:56.000Z
|
2020-05-24T11:50:56.000Z
|
dnac_trigger.py
|
CiscoTestAutomation/DNAC-pyATS-Genie
|
5e605adf5df0fa075ab06ae724a6b59c56d27495
|
[
"Apache-2.0"
] | 1
|
2020-03-23T15:28:14.000Z
|
2020-03-24T16:07:52.000Z
|
dnac_trigger.py
|
CiscoTestAutomation/DNAC-pyATS-Genie
|
5e605adf5df0fa075ab06ae724a6b59c56d27495
|
[
"Apache-2.0"
] | 2
|
2019-06-07T21:57:52.000Z
|
2019-10-12T16:42:33.000Z
|
import time
import json
import logging
from ats import aetest
from genie.utils.timeout import Timeout
### Code replaced by using Verification!
#from genie.utils.diff import Diff
###
from genie.harness.base import Trigger
log = logging.getLogger()
def _get_all_device_info(device):
response = device.rest.get('/api/v1/network-device')
all_device_info = response.json()
return all_device_info['response']
def _get_device_id_name(device, name):
for device in _get_all_device_info(device):
if device['hostname'] == name:
device_id = device['id']
break
else:
raise Exception('Could not find {n}'.format(n=name))
return device_id
class TriggerUnconfigConfigDescriptionInterface(Trigger):
'''Config and Unconfig interface description'''
@aetest.setup
def prerequisites(self, testbed, uut, steps, interface=None):
'''Pick one interface and save description'''
# find one up interface
with steps.start('Find an interface') as step:
output = uut.parse('show interfaces')
if interface:
self.interface = interface
self.description = output[interface]['description'].strip()
else:
# Pick first interface
for interface in output:
if 'description' in output[interface]:
self.description = output[interface]
self.interface = interface
break
else:
step.failed('Could not find an interface')
step.passed("Found interface '{i}'".format(i=self.interface))
with steps.start("Verify '{i}' is also in DNAC".format(i=self.interface)) as step:
# Make sure it is also in DNAC
testbed.devices['dnac'].connect(via='rest', alias='rest')
output = testbed.devices['dnac'].parse('/dna/intent/api/v1/interface', alias='rest')
# Find the interface
if output[self.interface]['description'].strip() != self.description:
step.failed("With cli interface '{i}' is description is '{dc}', "
"but with DNAC rest api it is "
"'{dd}'".format(i=interface,
dc=self.description,
dd=output[self.interface]['description']))
step.passed("'{i}' is also in DNAC and have same description "
"'{d}'".format(i=self.interface, d=self.description))
@aetest.test
def config(self, testbed, uut, steps, description):
'''Shut interface'''
with steps.start("Change description of interface '{i}'".format(i=self.interface)) as step:
uut.configure('''\
interface {i}
description {d}'''.format(i=self.interface, d=description))
device_id = _get_device_id_name(testbed.devices['dnac'], uut.name)
testbed.devices['dnac'].rest.put('/dna/intent/api/v1/network-device/sync?forceSync=true',
data=json.dumps([device_id]))
@aetest.test
def verify(self, testbed, uut, steps, description, dnac_timeout):
'''Verify if the change description worked'''
with steps.start("Verify '{i}' description is now '{d}' via "
"cli".format(i=self.interface, d=description)) as step:
# find one up interface
output = uut.parse('show interfaces {i}'.format(i=self.interface))
if output[self.interface]['description'] != description:
step.failed("'{i}' is expected to be '{d}' but instead is "
"'{dc}'".format(i=self.interface,
d=description,
dc=output[self.interface]['description']))
step.passed("'{i}' description is '{d}' as expected via "
"cli".format(i=self.interface,
d=description))
with steps.start("Verify '{i}' description is now '{d}' via "
"DNAC".format(i=self.interface, d=description)) as step:
# Add timeout as it can take time to update, even though the sync was sent
timeout = Timeout(max_time = dnac_timeout['max_time'],
interval = dnac_timeout['interval'])
while timeout.iterate():
# Make sure it is also up in DNAC
output = testbed.devices['dnac'].parse('/dna/intent/api/v1/interface', alias='rest')
if output[self.interface]['description'].strip() != description:
log.info("DNAC description is '{d}' instead of "
"'{dc}".format(d=output[self.interface]['description'].strip(), dc=description))
timeout.sleep()
continue
break
else:
step.failed("'{i}' is expected to be '{d}' but instead is "
"'{dd}'".format(i=self.interface,
d=description,
dd=output[self.interface]['description']))
step.passed("'{i}' description is '{d}' as expected via "
"DNAC".format(i=self.interface,
d=description))
@aetest.test
def unconfig(self, testbed, uut, steps):
'''Revert description'''
with steps.start("Revert description of interface '{i}'".format(i=self.interface)) as step:
uut.configure('''\
interface {i}
description {d}'''.format(i=self.interface,
d=self.description))
device_id = _get_device_id_name(testbed.devices['dnac'], uut.name)
testbed.devices['dnac'].rest.put('/dna/intent/api/v1/network-device/sync?forceSync=true',
data=json.dumps([device_id]))
@aetest.test
def verify_recover(self, testbed, uut, steps, dnac_timeout):
'''Figure out if interface description is reverted'''
with steps.start("Verify '{i}' description is now '{d}' via "
"cli".format(i=self.interface, d=self.description)) as step:
# find one up interface
output = uut.parse('show interfaces {i}'.format(i=self.interface))
if output[self.interface]['description'] != self.description:
step.failed("'{i}' is expected to be '{d}' but instead is "
"'{dc}'".format(i=self.interface,
d=self.description,
dc=output[self.interface]['description']))
step.passed("'{i}' description is '{d}' as expected via "
"cli".format(i=self.interface,
d=self.description))
with steps.start("Verify '{i}' description is now '{d}' via "
"DNAC".format(i=self.interface, d=self.description)) as step:
# Add timeout as it can take time to update, even though the sync was sent
timeout = Timeout(max_time = dnac_timeout['max_time'],
interval = dnac_timeout['interval'])
while timeout.iterate():
# Make sure it is also up in DNAC
output = testbed.devices['dnac'].parse('/dna/intent/api/v1/interface', alias='rest')
if output[self.interface]['description'].strip() != self.description:
timeout.sleep()
continue
break
else:
step.failed("'{i}' is expected to be '{d}' but instead is "
"'{dd}'".format(i=self.interface,
d=self.description,
dd=output[self.interface]['description']))
step.passed("'{i}' description is '{d}' as expected via "
"DNAC".format(i=self.interface,
d=self.description))
class TriggerShutNoShutInterface(Trigger):
'''Shut and unshut bgp'''
@aetest.setup
def prerequisites(self, testbed, uut, steps, interface=None):
'''Figure out if bgp is configured and up'''
# find one up interface
with steps.start('Find an up interface') as step:
output = uut.parse('show interfaces')
if interface:
self.interface = interface
else:
for interface in output:
if output[interface]['oper_status'] == 'up':
# found one
self.interface = interface
break
else:
step.failed('Could not find an up interface')
step.passed("Found interface '{i}'".format(i=self.interface))
with steps.start("Verify '{i}' is also up in DNAC".format(i=self.interface)) as step:
# Make sure it is also up in DNAC
testbed.devices['dnac'].connect(via='rest', alias='rest')
output = testbed.devices['dnac'].parse('/dna/intent/api/v1/interface', alias='rest')
# Find the interface
if output[self.interface]['status'] != 'up':
step.failed("With cli interface '{i}' is up, "
"but with DNAC rest api it is of "
"state '{s}'".format(i=interface,
s=output[self.interface]['status']))
step.passed("'{i}' is also up in DNAC".format(i=self.interface))
@aetest.test
def shut(self, uut, steps):
'''Shut interface'''
with steps.start("Shut '{i}'".format(i=self.interface)) as step:
uut.configure('''\
interface {i}
shutdown'''.format(i=self.interface))
@aetest.test
def verify(self, testbed, uut, steps):
'''Verify if the shut worked'''
with steps.start("Verify '{i}' is down via "
"cli".format(i=self.interface)) as step:
# find one up interface
output = uut.parse('show interfaces {i}'.format(i=self.interface))
if output[self.interface]['oper_status'] != 'down':
step.failed("'{i}' is expected to be down but instead is "
"'{s}'".format(i=self.interface,
s=output[interface]['oper_status']))
step.passed("'{i}' is down as expected via cli".format(i=self.interface))
with steps.start("Verify '{i}' is also down in DNAC".format(i=self.interface)) as step:
# Make sure it is also up in DNAC
output = testbed.devices['dnac'].parse('/dna/intent/api/v1/interface', alias='rest')
# Find the interface
if output[interface]['status'] != 'down':
step.failed("'{i}' is expected to be down but instead is "
"'{s}'".format(i=self.interface,
s=output[interface]['status']))
step.passed("'{i}' is down as expected via DNAC".format(i=self.interface))
@aetest.test
def unshut(self, uut, steps):
'''Shut bgp'''
with steps.start("Unshut '{i}'".format(i=self.interface)) as step:
uut.configure('''\
interface {i}
no shutdown'''.format(i=self.interface))
@aetest.test
def verify_recover(self, testbed, uut, steps, wait_time=20):
'''Figure out if interface is configured and up'''
with steps.start("Verify '{i}' is up via "
"cli".format(i=self.interface)) as step:
# find one up interface
output = uut.parse('show interfaces {i}'.format(i=self.interface))
if output[self.interface]['oper_status'] != 'up':
step.failed("'{i}' is expected to be up but instead is "
"'{s}'".format(i=self.interface,
s=output[interface]['oper_status']))
step.passed("'{i}' is up as expected via cli".format(i=self.interface))
with steps.start("Verify '{i}' is also up in DNAC".format(i=self.interface)) as step:
# Make sure it is also up in DNAC
output = testbed.devices['dnac'].parse('/dna/intent/api/v1/interface', alias='rest')
# Find the interface
if output[self.interface]['status'] != 'up':
step.failed("'{i}' is expected to be up but instead is "
"'{s}'".format(i=self.interface,
s=output[interface]['status']))
step.passed("'{i}' is up as expected via DNAC".format(i=self.interface))
| 45.491289
| 109
| 0.526731
| 1,440
| 13,056
| 4.742361
| 0.104167
| 0.118026
| 0.067653
| 0.123005
| 0.82135
| 0.777566
| 0.765412
| 0.751355
| 0.728218
| 0.678577
| 0
| 0.001287
| 0.345435
| 13,056
| 286
| 110
| 45.65035
| 0.7978
| 0.076746
| 0
| 0.63
| 0
| 0
| 0.209242
| 0.024735
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0.06
| 0.03
| 0
| 0.11
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
1abeb94a3e51fb85082f23f2848db091b3ad88ca
| 12,006
|
py
|
Python
|
tests/test_yacfg/test_generate_core.py
|
gaohoward/YamlConfiger
|
d5548ec6ca70e6646ea135454c503eff4da8a85e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_yacfg/test_generate_core.py
|
gaohoward/YamlConfiger
|
d5548ec6ca70e6646ea135454c503eff4da8a85e
|
[
"Apache-2.0"
] | 20
|
2020-11-16T16:40:04.000Z
|
2022-03-28T19:16:53.000Z
|
tests/test_yacfg/test_generate_core.py
|
gaohoward/YamlConfiger
|
d5548ec6ca70e6646ea135454c503eff4da8a85e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
import yacfg.yacfg
from yacfg.yacfg import generate_core
from yacfg.exceptions import TemplateError
from .profiles.fakes import (
fake_load_tuned_profile_no_defaults,
fake_load_tuned_profile_w_template,
fake_template_environment
)
@mock.patch('yacfg.yacfg.add_template_metadata', mock.Mock())
@mock.patch('yacfg.yacfg.add_render_config', mock.Mock())
@mock.patch('yacfg.yacfg.get_template_environment',
side_effect=fake_template_environment)
@mock.patch('yacfg.yacfg.get_main_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.filter_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.ensure_output_path', mock.Mock())
@mock.patch('yacfg.yacfg.write_output', mock.Mock())
@mock.patch('yacfg.yacfg.generate_outputs', mock.Mock())
def test_true(*_):
template = 'template/1.0.0'
render_options = 'Render options'
expected_result = 'generated data'
config_data, _ = fake_load_tuned_profile_no_defaults()
yacfg.yacfg.generate_outputs.return_value = expected_result
result = generate_core(
config_data=config_data,
template=template,
render_options=render_options
)
assert expected_result == result
# noinspection PyUnresolvedReferences
yacfg.yacfg.add_render_config.assert_called_with(config_data,
render_options)
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_template_environment.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_main_template_list.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.filter_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.ensure_output_path.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.write_output.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.generate_outputs.assert_called()
@mock.patch('yacfg.yacfg.get_template_environment',
side_effect=fake_template_environment)
@mock.patch('yacfg.yacfg.get_main_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.filter_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.ensure_output_path', mock.Mock())
@mock.patch('yacfg.yacfg.write_output', mock.Mock())
@mock.patch('yacfg.yacfg.generate_outputs', mock.Mock())
def test_true_no_output_path_write_profile(*_):
template = 'template/1.0.0'
expected_result = 'generated data'
config_data, tuned_profile = fake_load_tuned_profile_no_defaults()
yacfg.yacfg.generate_outputs.return_value = expected_result
result = generate_core(
config_data=config_data,
tuned_profile=tuned_profile,
template=template,
write_profile_data=True,
)
assert expected_result == result
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_template_environment.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_main_template_list.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.filter_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.ensure_output_path.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.write_output.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.generate_outputs.assert_called()
@mock.patch('yacfg.yacfg.get_template_environment',
side_effect=fake_template_environment)
@mock.patch('yacfg.yacfg.get_main_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.filter_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.ensure_output_path', mock.Mock())
@mock.patch('yacfg.yacfg.write_output', mock.Mock())
@mock.patch('yacfg.yacfg.generate_outputs', mock.Mock())
def test_true_output_path_write_no_profile(*_):
template = 'template/1.0.0'
expected_result = 'generated data'
config_data, _ = fake_load_tuned_profile_no_defaults()
yacfg.yacfg.generate_outputs.return_value = expected_result
result = generate_core(
config_data=config_data,
template=template,
write_profile_data=True,
)
assert expected_result == result
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_template_environment.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_main_template_list.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.filter_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.ensure_output_path.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.write_output.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.generate_outputs.assert_called()
@mock.patch('yacfg.yacfg.get_template_environment',
side_effect=fake_template_environment)
@mock.patch('yacfg.yacfg.get_main_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.filter_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.ensure_output_path', mock.Mock())
@mock.patch('yacfg.yacfg.write_output', mock.Mock())
@mock.patch('yacfg.yacfg.generate_outputs', mock.Mock())
def test_true_output_path_write_profile(*_):
template = 'template/1.0.0'
output_path = '/out/directory'
expected_result = 'generated data'
config_data, tuned_profile = fake_load_tuned_profile_no_defaults()
yacfg.yacfg.generate_outputs.return_value = expected_result
result = generate_core(
config_data=config_data,
tuned_profile=tuned_profile,
template=template,
output_path=output_path,
write_profile_data=True,
)
assert expected_result == result
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_template_environment.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_main_template_list.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.filter_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.ensure_output_path.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.write_output.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.generate_outputs.assert_called()
@mock.patch('yacfg.yacfg.get_template_environment',
side_effect=fake_template_environment)
@mock.patch('yacfg.yacfg.get_main_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.filter_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.ensure_output_path', mock.Mock())
@mock.patch('yacfg.yacfg.write_output', mock.Mock())
@mock.patch('yacfg.yacfg.generate_outputs', mock.Mock())
def test_true_output_path(*_):
template = 'template/1.0.0'
output_path = '/out/directory'
expected_result = 'generated data'
config_data, tuned_profile = fake_load_tuned_profile_no_defaults()
yacfg.yacfg.generate_outputs.return_value = expected_result
result = generate_core(
config_data=config_data,
tuned_profile=tuned_profile,
template=template,
output_path=output_path,
)
assert expected_result == result
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_template_environment.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_main_template_list.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.filter_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.ensure_output_path.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.write_output.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.generate_outputs.assert_called()
@mock.patch('yacfg.yacfg.get_template_environment',
side_effect=fake_template_environment)
@mock.patch('yacfg.yacfg.get_main_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.filter_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.ensure_output_path', mock.Mock())
@mock.patch('yacfg.yacfg.write_output', mock.Mock())
@mock.patch('yacfg.yacfg.generate_outputs', mock.Mock())
def test_true_template(*_):
template = 'template/1.0.0'
expected_result = 'generated data'
config_data, _ = fake_load_tuned_profile_no_defaults()
yacfg.yacfg.generate_outputs.return_value = expected_result
result = generate_core(
config_data=config_data,
template=template,
)
assert expected_result == result
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_template_environment.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_main_template_list.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.filter_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.ensure_output_path.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.write_output.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.generate_outputs.assert_called()
@mock.patch('yacfg.yacfg.get_template_environment',
side_effect=fake_template_environment)
@mock.patch('yacfg.yacfg.get_main_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.filter_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.ensure_output_path', mock.Mock())
@mock.patch('yacfg.yacfg.write_output', mock.Mock())
@mock.patch('yacfg.yacfg.generate_outputs', mock.Mock())
def test_true_profile_template(*_):
expected_result = 'generated data'
config_data, _ = fake_load_tuned_profile_w_template()
yacfg.yacfg.generate_outputs.return_value = expected_result
result = generate_core(config_data=config_data)
assert expected_result == result
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_template_environment.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_main_template_list.assert_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.filter_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.ensure_output_path.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.write_output.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.generate_outputs.assert_called()
@mock.patch('yacfg.yacfg.get_template_environment', mock.Mock())
@mock.patch('yacfg.yacfg.get_main_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.filter_template_list', mock.Mock())
@mock.patch('yacfg.yacfg.ensure_output_path', mock.Mock())
@mock.patch('yacfg.yacfg.write_output', mock.Mock())
@mock.patch('yacfg.yacfg.generate_outputs', mock.Mock())
def test_no_template_exception(*_):
config_data, _ = fake_load_tuned_profile_no_defaults()
with pytest.raises(TemplateError):
generate_core(config_data=config_data)
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_template_environment.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.get_main_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.filter_template_list.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.ensure_output_path.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.write_output.assert_not_called()
# noinspection PyUnresolvedReferences
yacfg.yacfg.generate_outputs.assert_not_called()
| 38.604502
| 74
| 0.769282
| 1,429
| 12,006
| 6.137859
| 0.083275
| 0.123133
| 0.079808
| 0.108311
| 0.914263
| 0.910158
| 0.895679
| 0.888724
| 0.867974
| 0.865124
| 0
| 0.002498
| 0.132934
| 12,006
| 310
| 75
| 38.729032
| 0.840058
| 0.19257
| 0
| 0.80402
| 0
| 0
| 0.182536
| 0.159277
| 0
| 0
| 0
| 0
| 0.281407
| 1
| 0.040201
| false
| 0
| 0.030151
| 0
| 0.070352
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
46cfe0fa2d1c3dfd4fc5359c57138341e1f8622d
| 49,122
|
py
|
Python
|
ext/bjut/mcpat.py
|
ChengYanJin/GEM5
|
43748f795489f36f6f9eef984b3fb67a31c32b80
|
[
"BSD-3-Clause"
] | 3
|
2017-11-19T06:49:54.000Z
|
2018-12-26T18:08:11.000Z
|
ext/bjut/mcpat.py
|
ChengYanJin/GEM5
|
43748f795489f36f6f9eef984b3fb67a31c32b80
|
[
"BSD-3-Clause"
] | null | null | null |
ext/bjut/mcpat.py
|
ChengYanJin/GEM5
|
43748f795489f36f6f9eef984b3fb67a31c32b80
|
[
"BSD-3-Clause"
] | 6
|
2016-07-31T18:48:18.000Z
|
2022-03-06T22:41:28.000Z
|
#!/usr/bin/python
#
# A Python script to generate McPAT XML and result files for the specified GEM5 config and stat files.
#
# Copyright (C) Min Cai 2015
#
from common import *
import os
import traceback
from lxml import etree
from optparse import OptionParser
from yattag import Doc
from pyparsing import Word, Literal, Optional, Suppress, ParseException, nums, restOfLine
class McPATEnabledExperiment(Experiment):
def __init__(self, type, dir, bench=None, l2_size=None, l2_assoc=None, l2_tags=None, section_num_to_use=2, gen_mcpat_xml_file=False, mcpat_enabled=True):
Experiment.__init__(self, type, dir, bench, l2_size, l2_assoc, l2_tags, section_num_to_use)
self.mcpat_enabled = mcpat_enabled
if self.mcpat_enabled:
if gen_mcpat_xml_file:
self.gen_mcpat_xml_file()
self.mcpat_stats = self.read_mcpat_stats()
else:
self.mcpat_stats = None
def gen_core(self, tag, i):
def gen_predictor():
with tag('component', id='system.core' + str(i) + '.predictor', name='PBT', type='BranchPredictor'):
with tag('param', name='assoc', value='1'):
pass
with tag('param', name='nbanks', value='1'):
pass
with tag('param', name='local_l1_predictor_size', value='12'):
pass
with tag('param', name='local_l2_predictor_size', value='4'):
pass
with tag('param', name='local_predictor_entries', value='8192'):
pass
with tag('param', name='global_predictor_entries', value='8192'):
pass
with tag('param', name='global_predictor_bits', value='4'):
pass
with tag('param', name='chooser_predictor_entries', value='8192'):
pass
with tag('param', name='chooser_predictor_bits', value='4'):
pass
def gen_itlb():
with tag('component', id='system.core' + str(i) + '.itlb', name='itlb', type='InstructionTLB'):
with tag('param', name='number_entries',
value=self.configs.execute('$.system.cpu[' + str(i) + '].itb.size')):
pass
with tag('param', name='latency', value='8'): # TODO
pass
with tag('param', name='throughput', value='3'):
pass
with tag('param', name='assoc', value='0'):
pass
with tag('param', name='nbanks', value='1'):
pass
with tag('stat', name='total_accesses',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.itb.fetch_accesses']))):
pass
with tag('stat', name='total_misses',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.itb.fetch_misses']))):
pass
with tag('stat', name='conflicts', value='0'): # TODO
pass
def gen_l1i():
with tag('component', id='system.core' + str(i) + '.icache', name='Instruction Cache', type='CacheUnit'):
with tag('param', name='level', value='1'):
pass
with tag('param', name='size', value=self.configs.execute('$.system.cpu[' + str(i) + '].icache.size')):
pass
with tag('param', name='block_size',
value=self.configs.execute('$.system.cpu[' + str(i) + '].icache.tags.block_size')):
pass
with tag('param', name='assoc',
value=self.configs.execute('$.system.cpu[' + str(i) + '].icache.assoc')):
pass
with tag('param', name='num_banks', value='1'):
pass
with tag('param', name='latency',
value=self.configs.execute('$.system.cpu[' + str(i) + '].icache.hit_latency')):
pass
with tag('param', name='throughput', value='3'):
pass
with tag('param', name='miss_buffer_size', value='2'):
pass
with tag('param', name='fetch_buffer_size', value='2'):
pass
with tag('param', name='prefetch_buffer_size', value='2'):
pass
with tag('param', name='writeback_buffer_size', value='0'):
pass
with tag('param', name='clockrate', value='0'):
pass
with tag('param', name='tech_type', value='0'):
pass
with tag('param', name='Directory_type', value='2'):
pass
with tag('param', name='device_type', value='0'):
pass
with tag('param', name='core_type', value='1'):
pass
with tag('param', name='wire_mat_type', value='2'):
pass
with tag('param', name='wire_type', value='0'):
pass
with tag('param', name='miss_buffer_assoc', value='0'):
pass
with tag('param', name='fetch_buffer_assoc', value='0'):
pass
with tag('param', name='prefetch_buffer_assoc', value='0'):
pass
with tag('param', name='writeback_buffer_assoc', value='0'):
pass
with tag('param', name='miss_buffer_banks', value='1'):
pass
with tag('param', name='fetch_buffer_banks', value='1'):
pass
with tag('param', name='prefetch_buffer_banks', value='1'):
pass
with tag('param', name='writeback_buffer_banks', value='1'):
pass
with tag('param', name='cache_access_mode', value='0'):
pass
with tag('param', name='miss_buff_access_mode', value='2'):
pass
with tag('param', name='fetch_buff_access_mode', value='2'):
pass
with tag('param', name='prefetch_buff_access_mode', value='2'):
pass
with tag('param', name='writeback_buff_access_mode', value='2'):
pass
with tag('param', name='cache_rw_ports', value='1'):
pass
with tag('param', name='cache_rd_ports', value='0'):
pass
with tag('param', name='cache_wr_ports', value='0'):
pass
with tag('param', name='cache_se_rd_ports', value='0'):
pass
with tag('param', name='cache_search_ports', value='0'):
pass
with tag('param', name='miss_buff_rw_ports', value='1'):
pass
with tag('param', name='miss_buff_rd_ports', value='0'):
pass
with tag('param', name='miss_buff_wr_ports', value='0'):
pass
with tag('param', name='miss_buff_se_rd_ports', value='0'):
pass
with tag('param', name='miss_buff_search_ports', value='1'):
pass
with tag('param', name='fetch_buff_rw_ports', value='1'):
pass
with tag('param', name='fetch_buff_rd_ports', value='0'):
pass
with tag('param', name='fetch_buff_wr_ports', value='0'):
pass
with tag('param', name='fetch_buff_se_rd_ports', value='0'):
pass
with tag('param', name='fetch_buff_search_ports', value='1'):
pass
with tag('param', name='pf_buff_rw_ports', value='1'):
pass
with tag('param', name='pf_buff_rd_ports', value='0'):
pass
with tag('param', name='pf_buff_wr_ports', value='0'):
pass
with tag('param', name='pf_buff_se_rd_ports', value='0'):
pass
with tag('param', name='pf_buff_search_ports', value='1'):
pass
with tag('param', name='wb_buff_rw_ports', value='1'):
pass
with tag('param', name='wb_buff_rd_ports', value='0'):
pass
with tag('param', name='wb_buff_wr_ports', value='0'):
pass
with tag('param', name='wb_buff_se_rd_ports', value='0'):
pass
with tag('param', name='wb_buff_search_ports', value='1'):
pass
with tag('param', name='pure_ram', value='0'):
pass
with tag('stat', name='read_accesses',
value=str(
int(self.stats['system.' + self.cpu_id(i, True) + '.icache.ReadReq_accesses::total']))):
pass
with tag('stat', name='read_misses',
value=str(
int(self.stats['system.' + self.cpu_id(i, True) + '.icache.ReadReq_misses::total']))):
pass
with tag('stat', name='conflicts', value='0'): # TODO
pass
with tag('stat', name='duty_cycle', value='0'): # TODO
pass
def gen_dtlb():
with tag('component', id='system.core' + str(i) + '.dtlb', name='dtlb', type='DataTLB'):
with tag('param', name='number_entries',
value=self.configs.execute('$.system.cpu[' + str(i) + '].dtb.size')):
pass
with tag('param', name='latency', value='8'): # TODO
pass
with tag('param', name='throughput', value='3'):
pass
with tag('param', name='assoc', value='0'):
pass
with tag('param', name='nbanks', value='1'):
pass
with tag('stat', name='read_accesses',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.dtb.read_accesses']))):
pass
with tag('stat', name='read_misses',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.dtb.read_misses']))):
pass
with tag('stat', name='conflicts', value='0'): # TODO
pass
def gen_l1d():
with tag('component', id='system.core' + str(i) + '.dcache', name='Data Cache', type='CacheUnit'):
with tag('param', name='level', value='1'):
pass
with tag('param', name='size', value=self.configs.execute('$.system.cpu[' + str(i) + '].dcache.size')):
pass
with tag('param', name='block_size',
value=self.configs.execute('$.system.cpu[' + str(i) + '].dcache.tags.block_size')):
pass
with tag('param', name='assoc',
value=self.configs.execute('$.system.cpu[' + str(i) + '].dcache.assoc')):
pass
with tag('param', name='num_banks', value='1'):
pass
with tag('param', name='latency',
value=self.configs.execute('$.system.cpu[' + str(i) + '].dcache.hit_latency')):
pass
with tag('param', name='throughput', value='3'):
pass
with tag('param', name='miss_buffer_size', value='2'):
pass
with tag('param', name='fetch_buffer_size', value='2'):
pass
with tag('param', name='prefetch_buffer_size', value='2'):
pass
with tag('param', name='writeback_buffer_size', value='0'):
pass
with tag('param', name='clockrate', value='0'):
pass
with tag('param', name='tech_type', value='0'):
pass
with tag('param', name='Directory_type', value='2'):
pass
with tag('param', name='device_type', value='0'):
pass
with tag('param', name='core_type', value='1'):
pass
with tag('param', name='wire_mat_type', value='2'):
pass
with tag('param', name='wire_type', value='0'):
pass
with tag('param', name='miss_buffer_assoc', value='0'):
pass
with tag('param', name='fetch_buffer_assoc', value='0'):
pass
with tag('param', name='prefetch_buffer_assoc', value='0'):
pass
with tag('param', name='writeback_buffer_assoc', value='0'):
pass
with tag('param', name='miss_buffer_banks', value='1'):
pass
with tag('param', name='fetch_buffer_banks', value='1'):
pass
with tag('param', name='prefetch_buffer_banks', value='1'):
pass
with tag('param', name='writeback_buffer_banks', value='1'):
pass
with tag('param', name='cache_access_mode', value='0'):
pass
with tag('param', name='miss_buff_access_mode', value='2'):
pass
with tag('param', name='fetch_buff_access_mode', value='2'):
pass
with tag('param', name='prefetch_buff_access_mode', value='2'):
pass
with tag('param', name='writeback_buff_access_mode', value='2'):
pass
with tag('param', name='cache_rw_ports', value='1'):
pass
with tag('param', name='cache_rd_ports', value='0'):
pass
with tag('param', name='cache_wr_ports', value='0'):
pass
with tag('param', name='cache_se_rd_ports', value='0'):
pass
with tag('param', name='cache_search_ports', value='0'):
pass
with tag('param', name='miss_buff_rw_ports', value='1'):
pass
with tag('param', name='miss_buff_rd_ports', value='0'):
pass
with tag('param', name='miss_buff_wr_ports', value='0'):
pass
with tag('param', name='miss_buff_se_rd_ports', value='0'):
pass
with tag('param', name='miss_buff_search_ports', value='1'):
pass
with tag('param', name='fetch_buff_rw_ports', value='1'):
pass
with tag('param', name='fetch_buff_rd_ports', value='0'):
pass
with tag('param', name='fetch_buff_wr_ports', value='0'):
pass
with tag('param', name='fetch_buff_se_rd_ports', value='0'):
pass
with tag('param', name='fetch_buff_search_ports', value='1'):
pass
with tag('param', name='pf_buff_rw_ports', value='1'):
pass
with tag('param', name='pf_buff_rd_ports', value='0'):
pass
with tag('param', name='pf_buff_wr_ports', value='0'):
pass
with tag('param', name='pf_buff_se_rd_ports', value='0'):
pass
with tag('param', name='pf_buff_search_ports', value='1'):
pass
with tag('param', name='wb_buff_rw_ports', value='1'):
pass
with tag('param', name='wb_buff_rd_ports', value='0'):
pass
with tag('param', name='wb_buff_wr_ports', value='0'):
pass
with tag('param', name='wb_buff_se_rd_ports', value='0'):
pass
with tag('param', name='wb_buff_search_ports', value='1'):
pass
with tag('param', name='pure_ram', value='0'):
pass
with tag('stat', name='read_accesses',
value=str(
int(self.stats['system.' + self.cpu_id(i, True) + '.dcache.ReadReq_accesses::total']))):
pass
with tag('stat', name='write_accesses',
value=str(
int(self.stats['system.' + self.cpu_id(i, True) + '.dcache.WriteReq_accesses::total']))):
pass
with tag('stat', name='read_misses',
value=str(
int(self.stats['system.' + self.cpu_id(i, True) + '.dcache.ReadReq_misses::total']))):
pass
with tag('stat', name='write_misses',
value=str(
int(self.stats['system.' + self.cpu_id(i, True) + '.dcache.WriteReq_misses::total']))):
pass
with tag('stat', name='conflicts', value='1'): # TODO
pass
with tag('stat', name='duty_cycle', value='1'): # TODO
pass
def gen_btargetbuf():
with tag('component', id='system.core' + str(i) + '.btargetbuf', name='btargetbuf',
type='BranchTargetBuffer'):
with tag('param', name='size', value='8192'):
pass
with tag('param', name='block_size', value='4'):
pass
with tag('param', name='assoc', value='2'):
pass
with tag('param', name='num_banks', value='1'):
pass
with tag('param', name='latency', value='1'):
pass
with tag('param', name='throughput', value='3'):
pass
with tag('param', name='rw_ports', value='1'):
pass
with tag('stat', name='read_accesses', value='25'): # TODO
pass
with tag('stat', name='write_accesses', value='25'): # TODO
pass
with tag('component', id='system.core' + str(i), name='core' + str(i), type='Core'):
with tag('param', name='clock_rate', value=str(int(self.stats['sim_freq']) / 1000 / 10 ** 6)):
pass
with tag('param', name='opt_local', value='0'):
pass
with tag('param', name='instruction_length', value='32'):
pass
with tag('param', name='opcode_width', value='7'):
pass
with tag('param', name='x86', value='0'):
pass
with tag('param', name='micro_opcode_width', value='8'):
pass
with tag('param', name='machine_type', value='1'):
pass
with tag('param', name='number_hardware_threads',
value=self.configs.execute('$.system.cpu[' + str(i) + '].numThreads')):
pass
with tag('param', name='fetch_width', value='1'):
pass
with tag('param', name='number_instruction_fetch_ports', value='1'):
pass
with tag('param', name='decode_width', value='1'):
pass
with tag('param', name='issue_width', value='1'):
pass
with tag('param', name='peak_issue_width', value='1'):
pass
with tag('param', name='commit_width', value='1'):
pass
with tag('param', name='fp_issue_width', value='1'):
pass
with tag('param', name='prediction_width', value='0'):
pass
with tag('param', name='int_pipelines', value='2'):
pass
with tag('param', name='fp_pipelines', value='1'):
pass
with tag('param', name='int_pipeline_depth', value='12'):
pass
with tag('param', name='fp_pipeline_depth', value='13'):
pass
with tag('param', name='ALU_per_core', value='2'):
pass
with tag('param', name='MUL_per_core', value='1'):
pass
with tag('param', name='FPU_per_core', value='1'):
pass
with tag('param', name='instruction_buffer_size', value='32'):
pass
with tag('param', name='instruction_window_scheme', value='0'):
pass
with tag('param', name='instruction_window_size', value='7'):
pass
with tag('param', name='fp_instruction_window_size', value='18'):
pass
with tag('param', name='ROB_size', value='56'):
pass
with tag('param', name='archi_Regs_IRF_size', value='30'):
pass
with tag('param', name='archi_Regs_FRF_size', value='48'):
pass
with tag('param', name='phy_Regs_IRF_size', value='34'):
pass
with tag('param', name='phy_Regs_FRF_size', value='40'):
pass
with tag('param', name='rename_scheme', value='0'):
pass
with tag('param', name='register_window_size', value='0'):
pass
with tag('param', name='register_window_throughput', value='4'):
pass
with tag('param', name='register_window_latency', value='4'):
pass
with tag('param', name='store_buffer_size', value='32'):
pass
with tag('param', name='load_buffer_size', value='32'):
pass
with tag('param', name='memory_ports', value='1'):
pass
with tag('param', name='RAS_size', value='16'):
pass
with tag('param', name='execu_wire_mat_type', value='2'):
pass
with tag('param', name='execu_bypass_base_width', value='1'):
pass
with tag('param', name='execu_bypass_base_height', value='1'):
pass
with tag('param', name='execu_bypass_start_wiring_level', value='3'):
pass
with tag('param', name='execu_bypass_route_over_perc', value='1'):
pass
with tag('param', name='globalCheckpoint', value='32'):
pass
with tag('param', name='perThreadState', value='8'):
pass
with tag('param', name='ROB_assoc', value='1'):
pass
with tag('param', name='ROB_nbanks', value='1'):
pass
with tag('param', name='ROB_tag_width', value='0'):
pass
with tag('param', name='scheduler_assoc', value='0'):
pass
with tag('param', name='scheduler_nbanks', value='1'):
pass
with tag('param', name='register_window_assoc', value='1'):
pass
with tag('param', name='register_window_nbanks', value='1'):
pass
with tag('param', name='register_window_tag_width', value='0'):
pass
with tag('param', name='register_window_rw_ports', value='1'):
pass
with tag('param', name='phy_Regs_IRF_assoc', value='1'):
pass
with tag('param', name='phy_Regs_IRF_nbanks', value='1'):
pass
with tag('param', name='phy_Regs_IRF_tag_width', value='0'):
pass
with tag('param', name='phy_Regs_IRF_rd_ports', value='1'):
pass
with tag('param', name='phy_Regs_IRF_wr_ports', value='1'):
pass
with tag('param', name='phy_Regs_FRF_assoc', value='1'):
pass
with tag('param', name='phy_Regs_FRF_nbanks', value='1'):
pass
with tag('param', name='phy_Regs_FRF_tag_width', value='0'):
pass
with tag('param', name='phy_Regs_FRF_rd_ports', value='1'):
pass
with tag('param', name='phy_Regs_FRF_wr_ports', value='1'):
pass
with tag('param', name='front_rat_nbanks', value='1'):
pass
with tag('param', name='front_rat_rw_ports', value='1'):
pass
with tag('param', name='retire_rat_nbanks', value='1'):
pass
with tag('param', name='retire_rat_rw_ports', value='0'):
pass
with tag('param', name='freelist_nbanks', value='1'):
pass
with tag('param', name='freelist_rw_ports', value='1'):
pass
with tag('param', name='load_buffer_assoc', value='0'):
pass
with tag('param', name='load_buffer_nbanks', value='1'):
pass
with tag('param', name='store_buffer_assoc', value='0'):
pass
with tag('param', name='store_buffer_nbanks', value='1'):
pass
with tag('param', name='instruction_buffer_assoc', value='1'):
pass
with tag('param', name='instruction_buffer_nbanks', value='1'):
pass
with tag('param', name='instruction_buffer_tag_width', value='0'):
pass
with tag('stat', name='total_instructions', value=str(
int(self.stats['system.' + self.cpu_id(i) + '.num_int_insts']) + int(
self.stats['system.' + self.cpu_id(i) + '.num_fp_insts']))):
pass
with tag('stat', name='int_instructions',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_int_insts']))):
pass
with tag('stat', name='fp_instructions',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_fp_insts']))):
pass
with tag('stat', name='branch_instructions', value='25'): # TODO
pass
with tag('stat', name='branch_mispredictions', value='2'): # TODO
pass
with tag('stat', name='load_instructions',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_load_insts']))):
pass
with tag('stat', name='store_instructions',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_store_insts']))):
pass
with tag('stat', name='committed_instructions', value=str(
int(self.stats['system.' + self.cpu_id(i) + '.num_int_insts']) + int(
self.stats['system.' + self.cpu_id(i) + '.num_fp_insts']))):
pass
with tag('stat', name='committed_int_instructions',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_int_insts']))):
pass
with tag('stat', name='committed_fp_instructions',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_fp_insts']))):
pass
with tag('stat', name='pipeline_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='total_cycles',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.numCycles']))): # TODO
pass
with tag('stat', name='ROB_reads', value='100'): # TODO
pass
with tag('stat', name='ROB_writes', value='100'): # TODO
pass
with tag('stat', name='rename_reads', value='100'): # TODO
pass
with tag('stat', name='rename_writes', value='100'): # TODO
pass
with tag('stat', name='fp_rename_reads', value='100'): # TODO
pass
with tag('stat', name='fp_rename_writes', value='100'): # TODO
pass
with tag('stat', name='inst_window_reads', value='80'): # TODO
pass
with tag('stat', name='inst_window_writes', value='80'): # TODO
pass
with tag('stat', name='inst_window_wakeup_accesses', value='80'): # TODO
pass
with tag('stat', name='fp_inst_window_reads', value='20'): # TODO
pass
with tag('stat', name='fp_inst_window_writes', value='20'): # TODO
pass
with tag('stat', name='fp_inst_window_wakeup_accesses', value='20'): # TODO
pass
with tag('stat', name='int_regfile_reads', value='160'): # TODO
pass
with tag('stat', name='float_regfile_reads', value='40'): # TODO
pass
with tag('stat', name='int_regfile_writes', value='80'): # TODO
pass
with tag('stat', name='float_regfile_writes', value='20'): # TODO
pass
with tag('stat', name='function_calls',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_func_calls']))): # TODO
pass
with tag('stat', name='context_switches', value='0'): # TODO
pass
with tag('stat', name='ialu_accesses',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_int_alu_accesses']))): # TODO
pass
with tag('stat', name='fpu_accesses',
value=str(int(self.stats['system.' + self.cpu_id(i) + '.num_fp_alu_accesses']))): # TODO
pass
with tag('stat', name='mul_accesses', value='10'): # TODO
pass
with tag('stat', name='cdb_alu_accesses', value='70'): # TODO
pass
with tag('stat', name='cdb_mul_accesses', value='10'): # TODO
pass
with tag('stat', name='cdb_fpu_accesses', value='20'): # TODO
pass
with tag('stat', name='IFU_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='LSU_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='MemManU_I_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='MemManU_D_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='ALU_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='MUL_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='FPU_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='ALU_cdb_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='MUL_cdb_duty_cycle', value='1'): # TODO
pass
with tag('stat', name='FPU_cdb_duty_cycle', value='1'): # TODO
pass
gen_predictor()
gen_itlb()
gen_l1i()
gen_dtlb()
gen_l1d()
gen_btargetbuf()
def gen_l2(self, tag, i=None):
with tag('component', id='system.L2' + ('' if i is None else str(i)), name='L2 Cache', type='CacheUnit'):
with tag('param', name='level', value='2'):
pass
with tag('param', name='size',
value=self.configs.execute(
'$.system.' + ('l2' if i is None else 'l2cache[' + str(i) + ']') + '.size')):
pass
with tag('param', name='block_size', value=self.configs.execute(
'$.system.' + (
'l2' if i is None else 'l2cache[' + str(i) + ']') + '.tags.block_size')):
pass
with tag('param', name='assoc',
value=self.configs.execute(
'$.system.' + ('l2' if i is None else 'l2cache[' + str(i) + ']') + '.assoc')):
pass
with tag('param', name='num_banks', value='1'):
pass
with tag('param', name='latency',
value=self.configs.execute('$.system.' + (
'l2' if i is None else 'l2cache[' + str(i) + ']') + '.hit_latency')):
pass
with tag('param', name='throughput', value='3'):
pass
with tag('param', name='miss_buffer_size', value='2'):
pass
with tag('param', name='fetch_buffer_size', value='2'):
pass
with tag('param', name='prefetch_buffer_size', value='2'):
pass
with tag('param', name='writeback_buffer_size', value='0'):
pass
with tag('param', name='clockrate', value=str(int(self.stats['sim_freq']) / 1000 / 10 ** 6)):
pass
with tag('param', name='tech_type', value='0'):
pass
with tag('param', name='Directory_type', value='2'):
pass
with tag('param', name='device_type', value='0'):
pass
with tag('param', name='core_type', value='1'):
pass
with tag('param', name='wire_mat_type', value='2'):
pass
with tag('param', name='wire_type', value='0'):
pass
with tag('param', name='miss_buffer_assoc', value='0'):
pass
with tag('param', name='fetch_buffer_assoc', value='0'):
pass
with tag('param', name='prefetch_buffer_assoc', value='0'):
pass
with tag('param', name='writeback_buffer_assoc', value='0'):
pass
with tag('param', name='miss_buffer_banks', value='1'):
pass
with tag('param', name='fetch_buffer_banks', value='1'):
pass
with tag('param', name='prefetch_buffer_banks', value='1'):
pass
with tag('param', name='writeback_buffer_banks', value='1'):
pass
with tag('param', name='cache_access_mode', value='0'):
pass
with tag('param', name='miss_buff_access_mode', value='2'):
pass
with tag('param', name='fetch_buff_access_mode', value='2'):
pass
with tag('param', name='prefetch_buff_access_mode', value='2'):
pass
with tag('param', name='writeback_buff_access_mode', value='2'):
pass
with tag('param', name='cache_rw_ports', value='1'):
pass
with tag('param', name='cache_rd_ports', value='0'):
pass
with tag('param', name='cache_wr_ports', value='0'):
pass
with tag('param', name='cache_se_rd_ports', value='0'):
pass
with tag('param', name='cache_search_ports', value='0'):
pass
with tag('param', name='miss_buff_rw_ports', value='1'):
pass
with tag('param', name='miss_buff_rd_ports', value='0'):
pass
with tag('param', name='miss_buff_wr_ports', value='0'):
pass
with tag('param', name='miss_buff_se_rd_ports', value='0'):
pass
with tag('param', name='miss_buff_search_ports', value='1'):
pass
with tag('param', name='fetch_buff_rw_ports', value='1'):
pass
with tag('param', name='fetch_buff_rd_ports', value='0'):
pass
with tag('param', name='fetch_buff_wr_ports', value='0'):
pass
with tag('param', name='fetch_buff_se_rd_ports', value='0'):
pass
with tag('param', name='fetch_buff_search_ports', value='1'):
pass
with tag('param', name='pf_buff_rw_ports', value='1'):
pass
with tag('param', name='pf_buff_rd_ports', value='0'):
pass
with tag('param', name='pf_buff_wr_ports', value='0'):
pass
with tag('param', name='pf_buff_se_rd_ports', value='0'):
pass
with tag('param', name='pf_buff_search_ports', value='1'):
pass
with tag('param', name='wb_buff_rw_ports', value='1'):
pass
with tag('param', name='wb_buff_rd_ports', value='0'):
pass
with tag('param', name='wb_buff_wr_ports', value='0'):
pass
with tag('param', name='wb_buff_se_rd_ports', value='0'):
pass
with tag('param', name='wb_buff_search_ports', value='1'):
pass
with tag('param', name='pure_ram', value='0'):
pass
with tag('stat', name='read_accesses',
value=str(int(self.stats['system.' + self.l2_id(i) + '.ReadCleanReq_accesses::total']) + int(
self.stats['system.' + self.l2_id(i) + '.ReadSharedReq_accesses::total']))):
pass
with tag('stat', name='write_accesses',
value=str(int(self.stats['system.' + self.l2_id(i) + '.ReadExReq_accesses::total']))):
pass
with tag('stat', name='read_misses',
value=str(int(self.stats['system.' + self.l2_id(i) + '.ReadCleanReq_misses::total']) + int(
self.stats['system.' + self.l2_id(i) + '.ReadSharedReq_misses::total']))):
pass
with tag('stat', name='write_misses',
value=str(int(self.stats['system.' + self.l2_id(i) + '.ReadExReq_misses::total']))):
pass
with tag('stat', name='conflicts', value='1'): # TODO
pass
with tag('stat', name='duty_cycle', value='1'): # TODO
pass
def gen_nocs(self, tag):
# TODO
pass
def gen_mcs(self, tag):
# TODO
pass
def gen_misc(self, tag):
# TODO
pass
def gen_system(self, tag):
with tag('param', name='core_tech_node', value='40'):
pass
with tag('param', name='target_core_clockrate', value=str(int(self.stats['sim_freq']) / 1000 / 10 ** 6)):
pass
with tag('param', name='temperature', value='380'):
pass
with tag('param', name='interconnect_projection_type', value='1'):
pass
with tag('param', name='device_type', value='0'):
pass
with tag('param', name='longer_channel_device', value='1'):
pass
with tag('param', name='machine_bits', value='64'):
pass
with tag('param', name='virtual_address_width', value='64'):
pass
with tag('param', name='physical_address_width', value='64'):
pass
with tag('param', name='virtual_memory_page_size', value='4096'):
pass
with tag('param', name='wire_is_mat_type', value='2'):
pass
with tag('param', name='wire_os_mat_type', value='2'):
pass
with tag('param', name='delay_wt', value='100'):
pass
with tag('param', name='area_wt', value='0'):
pass
with tag('param', name='dynamic_power_wt', value='100'):
pass
with tag('param', name='leakage_power_wt', value='0'):
pass
with tag('param', name='cycle_time_wt', value='0'):
pass
with tag('param', name='delay_dev', value='10000'):
pass
with tag('param', name='area_dev', value='10000'):
pass
with tag('param', name='dynamic_power_dev', value='10000'):
pass
with tag('param', name='leakage_power_dev', value='10000'):
pass
with tag('param', name='cycle_time_dev', value='10000'):
pass
with tag('param', name='ed', value='2'):
pass
with tag('param', name='burst_len', value='1'):
pass
with tag('param', name='int_prefetch_w', value='1'):
pass
with tag('param', name='page_sz_bits', value='0'):
pass
with tag('param', name='rpters_in_htree', value='1'):
pass
with tag('param', name='ver_htree_wires_over_array', value='0'):
pass
with tag('param', name='nuca', value='0'):
pass
with tag('param', name='nuca_bank_count', value='0'):
pass
with tag('param', name='force_cache_config', value='0'):
pass
with tag('param', name='wt', value='0'):
pass
with tag('param', name='force_wiretype', value='0'):
pass
with tag('param', name='print_detail', value='1'):
pass
with tag('param', name='add_ecc_b_', value='1'):
pass
with tag('param', name='broadcast_addr_din_over_ver_htrees', value='0'):
pass
with tag('stat', name='total_cycles', value=str(int(self.stats['system.' + self.cpu_id(0) + '.numCycles']))):
pass
for i in range(self.num_cpus()):
self.gen_core(tag, i)
if self.numa():
for i in range(self.num_l2caches()):
self.gen_l2(tag, i)
else:
self.gen_l2(tag)
self.gen_nocs(tag)
self.gen_mcs(tag)
self.gen_misc(tag)
def gen_mcpat_xml_file(self):
print 'Generating McPAT files for experiment under "' + self.dir + '"'
try:
(doc, tag, text) = Doc().tagtext()
with tag('component', id='root', name='root'):
with tag('component', id='system', name='system', type='System'):
self.gen_system(tag)
mcpat_xml = etree.tostring(etree.fromstring(doc.getvalue()), pretty_print=True).rstrip()
with open(self.mcpat_in_xml_file_name(), 'w') as mcpat_xml_file:
mcpat_xml_file.write(mcpat_xml)
os.system('../../build/mcpat/mcpat -infile ' + self.mcpat_in_xml_file_name() +
' -print_level 5 > ' + self.mcpat_out_file_name())
except:
traceback.print_exc()
def read_mcpat_stats_by_key(self, key, stat_rule, mcpat_stats):
try:
with open(self.mcpat_out_file_name()) as stats_file:
i = 0
read_system = False
read_l2cache = False
for stat_line in stats_file:
if 'System:' in stat_line:
read_system = True
elif 'L2 Cache:' in stat_line:
read_l2cache = True
elif read_system or (not i > self.num_l2caches() and read_l2cache):
try:
stat = stat_rule.parseString(stat_line)
if read_system:
key_composed = 'system.' + key
read_system = False
elif read_l2cache:
key_composed = 'system.' + self.l2_id(i if self.numa() else None) + '.' + key
read_l2cache = False
i += 1
else:
print 'Cannot handle the stat line: ' + stat_line
sys.exit(-1)
value = stat[0]
mcpat_stats[key_composed] = value
except ParseException:
pass
except:
traceback.print_exc()
def read_mcpat_stats(self):
mcpat_stats = collections.OrderedDict()
self.read_mcpat_stats_by_key('subthreshold_leakage_power',
Suppress(Literal('Subthreshold') + Literal('Leakage') + Literal('Power') + Literal('=')) +
Word('.' + nums) +
Suppress(Literal('W') + Optional(restOfLine)), mcpat_stats)
self.read_mcpat_stats_by_key('gate_leakage_power',
Suppress(Literal('Gate') + Literal('Leakage') + Literal('Power') + Literal('=')) +
Word('.' + nums) +
Suppress(Literal('W') + Optional(restOfLine)), mcpat_stats)
self.read_mcpat_stats_by_key('runtime_dynamic_power',
Suppress(Literal('Runtime') + Literal('Dynamic') + Literal('Power') + Literal('=')) +
Word('.' + nums) +
Suppress(Literal('W') + Optional(restOfLine)), mcpat_stats)
self.read_mcpat_stats_by_key('runtime_dynamic_energy',
Suppress(Literal('Runtime') + Literal('Dynamic') + Literal('Energy') + Literal('=')) +
Word('.' + nums) +
Suppress(Literal('J') + Optional(restOfLine)), mcpat_stats)
self.read_mcpat_stats_by_key('total_runtime_energy',
Suppress(Literal('Total') + Literal('Runtime') + Literal('Energy') + Literal('=')) +
Word('.' + nums) +
Suppress(Literal('J') + Optional(restOfLine)), mcpat_stats)
return mcpat_stats
def mcpat_system_stat(self, key):
if self.mcpat_stats is None:
return -1
key_composed = 'system.' + key
return -1 if self.mcpat_stats is None or key_composed not in self.mcpat_stats else float(self.mcpat_stats[key_composed])
def mcpat_l2_stat(self, key):
if self.mcpat_stats is None:
return -1
total_runtime_energy = []
if self.numa():
for i in range(self.num_l2caches()):
key_composed = 'system.' + self.l2_id(i if self.numa() else None) + '.' + key
total_runtime_energy.append(
-1 if self.mcpat_stats is None or key_composed not in self.mcpat_stats else float(self.mcpat_stats[key_composed]))
else:
key_composed = 'system.' + self.l2_id() + '.' + key
total_runtime_energy.append(
-1 if self.mcpat_stats is None or key_composed not in self.mcpat_stats else float(self.mcpat_stats[key_composed]))
return sum(total_runtime_energy)
def system_subthreshold_leakage_power(self):
return self.mcpat_system_stat('subthreshold_leakage_power')
def l2_subthreshold_leakage_power(self):
return self.mcpat_l2_stat('subthreshold_leakage_power')
def system_gate_leakage_power(self):
return self.mcpat_system_stat('gate_leakage_power')
def l2_gate_leakage_power(self):
return self.mcpat_l2_stat('gate_leakage_power')
def system_runtime_dynamic_power(self):
return self.mcpat_system_stat('runtime_dynamic_power')
def l2_runtime_dynamic_power(self):
return self.mcpat_l2_stat('runtime_dynamic_power')
def system_runtime_dynamic_energy(self):
return self.mcpat_system_stat('runtime_dynamic_energy')
def l2_runtime_dynamic_energy(self):
return self.mcpat_l2_stat('runtime_dynamic_energy')
def system_total_runtime_energy(self):
return self.mcpat_system_stat('total_runtime_energy')
def l2_total_runtime_energy(self):
return self.mcpat_l2_stat('total_runtime_energy')
def generate_mcpat_xml_files(rootdir):
if os.path.exists(rootdir):
for subdir, dirs, files in os.walk(rootdir):
for dir in dirs:
work_dir = os.path.join(subdir, dir)
stats_file_name = os.path.join(work_dir, 'stats.txt')
if os.path.isfile(stats_file_name):
McPATEnabledExperiment(work_dir, gen_mcpat_xml_file=True)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--dir', type='string', default='m5out/',
help='the path to the directory where the experiment\'s result files are located')
(option, arg) = parser.parse_args()
experiment = McPATEnabledExperiment(option.dir)
experiment.gen_mcpat_xml_file()
| 46.037488
| 157
| 0.488926
| 5,469
| 49,122
| 4.190711
| 0.070397
| 0.120031
| 0.179982
| 0.217811
| 0.85187
| 0.831712
| 0.804442
| 0.746717
| 0.652777
| 0.58602
| 0
| 0.016887
| 0.369529
| 49,122
| 1,066
| 158
| 46.080675
| 0.723151
| 0.008326
| 0
| 0.643932
| 1
| 0
| 0.220907
| 0.054343
| 0
| 0
| 0
| 0.000938
| 0
| 0
| null | null | 0.392177
| 0.007021
| null | null | 0.007021
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
2017794e577255a230df08d39c7dc6286d341d0d
| 11,258
|
py
|
Python
|
model/model_ablation.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | 1
|
2022-03-23T11:54:33.000Z
|
2022-03-23T11:54:33.000Z
|
model/model_ablation.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | null | null | null |
model/model_ablation.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | null | null | null |
"""
uresnext_nlocal with global attention
"""
from model.block import *
import torch.nn as nn
import torch.nn.functional as F
import torch
import torchvision.models as models
class AttUResNeXt_class_v2_3(nn.Module):
"""Decoder part of the UNet
use attblock_v1, however using the context
Parameters:
n_classes -- number of the classes of the given dataset
Tips:
set align_corners = True for better performance of semantic segmentation (https://github.com/pytorch/vision/issues/1708)
"""
def __init__(self, args, n_classes=6):
super().__init__()
self.n_classes = n_classes
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.gap = nn.AdaptiveAvgPool2d((1, 1))
### encoder ###
resnext = models.resnext101_32x8d(pretrained=args.is_pretrained)
self.firstconv = resnext.conv1
self.firstbn = resnext.bn1
self.firstrelu = resnext.relu
self.firstmaxpool = resnext.maxpool
self.encoder1 = resnext.layer1
self.encoder2 = resnext.layer2
self.encoder3 = resnext.layer3
self.encoder4 = resnext.layer4
### decoder ###
self.attblock4 = AttBlock_v2(256 + 256, 256, out_channels=n_classes)
# level 1
self.nlocal1 = NLBlockND(2048, inter_channels=1024) # half the inter channels for computational efficiency
self.gconv1 = DoubleConv(2048,256,512) # for decrease the channel of global context
self.conv1 = BnConv(2048, 1024, 3, padding='same')
# level 2
self.dconv1 = DoubleConv(2048, 1024)
self.conv2 = BnConv(1024, 512, 3, padding='same')
# level 3
self.dconv2 = DoubleConv(1024, 512)
self.conv3 = BnConv(512, 256, 3, padding='same')
# level 4
self.dconv3 = DoubleConv(512, 256)
self.conv4 = BnConv(256, 64, 3, padding='same')
# level 5
self.dconv4 = DoubleConv(128, 64)
# level 6
self.dconv5 = DoubleConv(64, 64)
self.conv5 = BnConv(64, self.n_classes, 3, padding='same')
def forward(self, img):
shape = (img.shape[2], img.shape[2]) # for attblock
### encoder ###
x1 = self.firstconv(img)
x1 = self.firstbn(x1)
e0 = self.firstrelu(x1)
e1 = self.firstmaxpool(e0)
e1 = self.encoder1(e1)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
### decoder ###
x = self.nlocal1(e4)
context = self.gap(self.gconv1(x))
# level 1
# level 2
# interpolation -- mini-batch x channels x [optional depth] x [optional height] x width.
x = self.up(x)
x = self.conv1(x)
x = torch.cat((e3, x), dim=1)
x = self.dconv1(x)
# level 3
x = self.up(x)
x = self.conv2(x)
x = torch.cat((e2, x), dim=1)
x = self.dconv2(x)
# level 4
x = self.up(x)
x = self.conv3(x)
x = torch.cat((e1, x), dim=1)
x = self.dconv3(x)
att4 = self.attblock4(x, context, shape)
# level 5
x = self.up(x)
x = self.conv4(x)
x = torch.cat((e0, x), dim=1)
x = self.dconv4(x)
# level 6
x = self.up(x)
x = self.dconv5(x)
x = self.conv5(x)
# att_sum = (self.a1*att1 + self.a2*att2 + self.a3*att3 + self.a4*att4) / 4.0 #weighted attention
att_sum = att4
x = att_sum * x
x = F.log_softmax(x, dim=1)
return x
class AttUResNeXt_class_v2_2(nn.Module):
"""Decoder part of the UNet
use attblock_v1, however using the context
Parameters:
n_classes -- number of the classes of the given dataset
Tips:
set align_corners = True for better performance of semantic segmentation (https://github.com/pytorch/vision/issues/1708)
"""
def __init__(self, args, n_classes=6):
super().__init__()
self.n_classes = n_classes
self.a3 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.a4 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.gap = nn.AdaptiveAvgPool2d((1, 1))
### encoder ###
resnext = models.resnext101_32x8d(pretrained=args.is_pretrained)
self.firstconv = resnext.conv1
self.firstbn = resnext.bn1
self.firstrelu = resnext.relu
self.firstmaxpool = resnext.maxpool
self.encoder1 = resnext.layer1
self.encoder2 = resnext.layer2
self.encoder3 = resnext.layer3
self.encoder4 = resnext.layer4
### decoder ###
self.attblock3 = AttBlock_v2(512 + 256, 512, out_channels=n_classes)
self.attblock4 = AttBlock_v2(256 + 256, 256, out_channels=n_classes)
# level 1
self.nlocal1 = NLBlockND(2048, inter_channels=1024) # half the inter channels for computational efficiency
self.gconv1 = DoubleConv(2048,256,512) # for decrease the channel of global context
self.conv1 = BnConv(2048, 1024, 3, padding='same')
# level 2
self.dconv1 = DoubleConv(2048, 1024)
self.conv2 = BnConv(1024, 512, 3, padding='same')
# level 3
self.dconv2 = DoubleConv(1024, 512)
self.conv3 = BnConv(512, 256, 3, padding='same')
# level 4
self.dconv3 = DoubleConv(512, 256)
self.conv4 = BnConv(256, 64, 3, padding='same')
# level 5
self.dconv4 = DoubleConv(128, 64)
# level 6
self.dconv5 = DoubleConv(64, 64)
self.conv5 = BnConv(64, self.n_classes, 3, padding='same')
def forward(self, img):
shape = (img.shape[2], img.shape[2]) # for attblock
### encoder ###
x1 = self.firstconv(img)
x1 = self.firstbn(x1)
e0 = self.firstrelu(x1)
e1 = self.firstmaxpool(e0)
e1 = self.encoder1(e1)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
### decoder ###
x = self.nlocal1(e4)
context = self.gap(self.gconv1(x))
# level 1
# level 2
# interpolation -- mini-batch x channels x [optional depth] x [optional height] x width.
x = self.up(x)
x = self.conv1(x)
x = torch.cat((e3, x), dim=1)
x = self.dconv1(x)
# level 3
x = self.up(x)
x = self.conv2(x)
x = torch.cat((e2, x), dim=1)
x = self.dconv2(x)
att3 = self.attblock3(x, context, shape)
# level 4
x = self.up(x)
x = self.conv3(x)
x = torch.cat((e1, x), dim=1)
x = self.dconv3(x)
att4 = self.attblock4(x, context, shape)
# level 5
x = self.up(x)
x = self.conv4(x)
x = torch.cat((e0, x), dim=1)
x = self.dconv4(x)
# level 6
x = self.up(x)
x = self.dconv5(x)
x = self.conv5(x)
# att_sum = (self.a1*att1 + self.a2*att2 + self.a3*att3 + self.a4*att4) / 4.0 #weighted attention
att_sum = (self.a3*att3 + self.a4*att4) / (self.a3 + self.a4)
x = att_sum * x
x = F.log_softmax(x, dim=1)
return x
class AttUResNeXt_class_v2_1(nn.Module):
"""Decoder part of the UNet
use attblock_v1, however using the context
Parameters:
n_classes -- number of the classes of the given dataset
Tips:
set align_corners = True for better performance of semantic segmentation (https://github.com/pytorch/vision/issues/1708)
"""
def __init__(self, args, n_classes=6):
super().__init__()
self.n_classes = n_classes
self.a2 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.a3 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.a4 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.gap = nn.AdaptiveAvgPool2d((1, 1))
### encoder ###
resnext = models.resnext101_32x8d(pretrained=args.is_pretrained)
self.firstconv = resnext.conv1
self.firstbn = resnext.bn1
self.firstrelu = resnext.relu
self.firstmaxpool = resnext.maxpool
self.encoder1 = resnext.layer1
self.encoder2 = resnext.layer2
self.encoder3 = resnext.layer3
self.encoder4 = resnext.layer4
### decoder ###
self.attblock2 = AttBlock_v2(1024 + 256, 1024, out_channels=n_classes)
self.attblock3 = AttBlock_v2(512 + 256, 512, out_channels=n_classes)
self.attblock4 = AttBlock_v2(256 + 256, 256, out_channels=n_classes)
# level 1
self.nlocal1 = NLBlockND(2048, inter_channels=1024) # half the inter channels for computational efficiency
self.gconv1 = DoubleConv(2048,256,512) # for decrease the channel of global context
self.conv1 = BnConv(2048, 1024, 3, padding='same')
# level 2
self.dconv1 = DoubleConv(2048, 1024)
self.conv2 = BnConv(1024, 512, 3, padding='same')
# level 3
self.dconv2 = DoubleConv(1024, 512)
self.conv3 = BnConv(512, 256, 3, padding='same')
# level 4
self.dconv3 = DoubleConv(512, 256)
self.conv4 = BnConv(256, 64, 3, padding='same')
# level 5
self.dconv4 = DoubleConv(128, 64)
# level 6
self.dconv5 = DoubleConv(64, 64)
self.conv5 = BnConv(64, self.n_classes, 3, padding='same')
def forward(self, img):
shape = (img.shape[2], img.shape[2]) # for attblock
### encoder ###
x1 = self.firstconv(img)
x1 = self.firstbn(x1)
e0 = self.firstrelu(x1)
e1 = self.firstmaxpool(e0)
e1 = self.encoder1(e1)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
### decoder ###
x = self.nlocal1(e4)
context = self.gap(self.gconv1(x))
# level 1
# level 2
# interpolation -- mini-batch x channels x [optional depth] x [optional height] x width.
x = self.up(x)
x = self.conv1(x)
x = torch.cat((e3, x), dim=1)
x = self.dconv1(x)
att2 = self.attblock2(x, context, shape)
# level 3
x = self.up(x)
x = self.conv2(x)
x = torch.cat((e2, x), dim=1)
x = self.dconv2(x)
att3 = self.attblock3(x, context, shape)
# level 4
x = self.up(x)
x = self.conv3(x)
x = torch.cat((e1, x), dim=1)
x = self.dconv3(x)
att4 = self.attblock4(x, context, shape)
# level 5
x = self.up(x)
x = self.conv4(x)
x = torch.cat((e0, x), dim=1)
x = self.dconv4(x)
# level 6
x = self.up(x)
x = self.dconv5(x)
x = self.conv5(x)
# att_sum = (self.a1*att1 + self.a2*att2 + self.a3*att3 + self.a4*att4) / 4.0 #weighted attention
att_sum = (self.a2*att2 + self.a3*att3 + self.a4*att4) / (self.a2 + self.a3 + self.a4)
x = att_sum * x
x = F.log_softmax(x, dim=1)
return x
| 34.746914
| 128
| 0.575058
| 1,503
| 11,258
| 4.240852
| 0.111111
| 0.037653
| 0.016944
| 0.018826
| 0.963602
| 0.960621
| 0.960621
| 0.957484
| 0.957484
| 0.953404
| 0
| 0.085576
| 0.300409
| 11,258
| 324
| 129
| 34.746914
| 0.723718
| 0.190442
| 0
| 0.932692
| 0
| 0
| 0.009447
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028846
| false
| 0
| 0.024038
| 0
| 0.081731
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
647a2b880593676874afaef75a25554f96289425
| 185
|
py
|
Python
|
augmenty/tests/test_general.py
|
koaning/augmenty
|
13dbdbb5fd56b36c97678ae48d1e0d869987f6dd
|
[
"MIT"
] | null | null | null |
augmenty/tests/test_general.py
|
koaning/augmenty
|
13dbdbb5fd56b36c97678ae48d1e0d869987f6dd
|
[
"MIT"
] | 1
|
2022-03-12T02:25:00.000Z
|
2022-03-12T02:26:01.000Z
|
augmenty/tests/test_general.py
|
HishamKhdair/augmenty
|
a65a7beac410f53706bb7838026f2bac9b89d544
|
[
"MIT"
] | null | null | null |
import augmenty
def test_info():
assert isinstance(augmenty.__version__, str)
assert isinstance(augmenty.__download_url__, str)
assert isinstance(augmenty.__title__, str)
| 23.125
| 53
| 0.772973
| 21
| 185
| 6.142857
| 0.571429
| 0.372093
| 0.55814
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145946
| 185
| 7
| 54
| 26.428571
| 0.816456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| true
| 0
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
64beae3d982074c775c45c0c72521ee79d8bbc9e
| 4,428
|
py
|
Python
|
jhdf/src/test/resources/scripts/vlen_datasets.py
|
rdandekar-slb/jhdf
|
d47ae12e1f65352efd336c8961d77c747ce68978
|
[
"MIT"
] | null | null | null |
jhdf/src/test/resources/scripts/vlen_datasets.py
|
rdandekar-slb/jhdf
|
d47ae12e1f65352efd336c8961d77c747ce68978
|
[
"MIT"
] | null | null | null |
jhdf/src/test/resources/scripts/vlen_datasets.py
|
rdandekar-slb/jhdf
|
d47ae12e1f65352efd336c8961d77c747ce68978
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------
# This file is part of jHDF. A pure Java library for accessing HDF5 files.
#
# http://jhdf.io
#
# Copyright (c) 2020 James Mudd
#
# MIT License see 'LICENSE' file
#-------------------------------------------------------------------------------
import h5py
import numpy as np
# The idea of this test if to write variable length (ragged array) datasets
def write_vlen_datasets(f):
uint8_vlen_type = h5py.vlen_dtype(np.uint8)
uint8_vlen_dataset_chunked = f.create_dataset("vlen_uint8_data", (3,), dtype=uint8_vlen_type)
uint8_vlen_dataset_chunked[0] = [0]
uint8_vlen_dataset_chunked[1] = [1, 2]
uint8_vlen_dataset_chunked[2] = [3, 4, 5]
uint16_vlen_type_chunked = h5py.vlen_dtype(np.uint16)
uint16_vlen_dataset = f.create_dataset("vlen_uint16_data", (3,), dtype=uint16_vlen_type_chunked)
uint16_vlen_dataset[0] = [0]
uint16_vlen_dataset[1] = [1, 2]
uint16_vlen_dataset[2] = [3, 4, 5]
uint32_vlen_type = h5py.vlen_dtype(np.uint32)
uint32_vlen_dataset_chunked = f.create_dataset("vlen_uint32_data", (3,), dtype=uint32_vlen_type)
uint32_vlen_dataset_chunked[0] = [0]
uint32_vlen_dataset_chunked[1] = [1, 2]
uint32_vlen_dataset_chunked[2] = [3, 4, 5]
uint64_vlen_type = h5py.vlen_dtype(np.uint64)
uint64_vlen_dataset_chunked = f.create_dataset("vlen_uint64_data", (3,), dtype=uint64_vlen_type)
uint64_vlen_dataset_chunked[0] = [0]
uint64_vlen_dataset_chunked[1] = [1, 2]
uint64_vlen_dataset_chunked[2] = [3, 4, 5]
float32_vlen_type = h5py.vlen_dtype(np.float32)
float32_vlen_dataset_chunked = f.create_dataset("vlen_float32_data", (3,), dtype=float32_vlen_type)
float32_vlen_dataset_chunked[0] = [0]
float32_vlen_dataset_chunked[1] = [1, 2]
float32_vlen_dataset_chunked[2] = [3, 4, 5]
float64_vlen_type = h5py.vlen_dtype(np.float64)
float64_vlen_dataset_chunked = f.create_dataset("vlen_float64_data", (3,), dtype=float64_vlen_type)
float64_vlen_dataset_chunked[0] = [0]
float64_vlen_dataset_chunked[1] = [1, 2]
float64_vlen_dataset_chunked[2] = [3, 4, 5]
# Chunked
uint8_vlen_type = h5py.vlen_dtype(np.uint8)
uint8_vlen_dataset_chunked = f.create_dataset("vlen_uint8_data_chunked", (3,), dtype=uint8_vlen_type, chunks=(3,))
uint8_vlen_dataset_chunked[0] = [0]
uint8_vlen_dataset_chunked[1] = [1, 2]
uint8_vlen_dataset_chunked[2] = [3, 4, 5]
uint16_vlen_type_chunked = h5py.vlen_dtype(np.uint16)
uint16_vlen_dataset = f.create_dataset("vlen_uint16_data_chunked", (3,), dtype=uint16_vlen_type_chunked, chunks=(3,))
uint16_vlen_dataset[0] = [0]
uint16_vlen_dataset[1] = [1, 2]
uint16_vlen_dataset[2] = [3, 4, 5]
uint32_vlen_type = h5py.vlen_dtype(np.uint32)
uint32_vlen_dataset_chunked = f.create_dataset("vlen_uint32_data_chunked", (3,), dtype=uint32_vlen_type, chunks=(3,))
uint32_vlen_dataset_chunked[0] = [0]
uint32_vlen_dataset_chunked[1] = [1, 2]
uint32_vlen_dataset_chunked[2] = [3, 4, 5]
uint64_vlen_type = h5py.vlen_dtype(np.uint64)
uint64_vlen_dataset_chunked = f.create_dataset("vlen_uint64_data_chunked", (3,), dtype=uint64_vlen_type, chunks=(3,))
uint64_vlen_dataset_chunked[0] = [0]
uint64_vlen_dataset_chunked[1] = [1, 2]
uint64_vlen_dataset_chunked[2] = [3, 4, 5]
float32_vlen_type = h5py.vlen_dtype(np.float32)
float32_vlen_dataset_chunked = f.create_dataset("vlen_float32_data_chunked", (3,), dtype=float32_vlen_type, chunks=(3,))
float32_vlen_dataset_chunked[0] = [0]
float32_vlen_dataset_chunked[1] = [1, 2]
float32_vlen_dataset_chunked[2] = [3, 4, 5]
float64_vlen_type = h5py.vlen_dtype(np.float64)
float64_vlen_dataset_chunked = f.create_dataset("vlen_float64_data_chunked", (3,), dtype=float64_vlen_type, chunks=(3,))
float64_vlen_dataset_chunked[0] = [0]
float64_vlen_dataset_chunked[1] = [1, 2]
float64_vlen_dataset_chunked[2] = [3, 4, 5]
f.flush()
f.close()
if __name__ == '__main__':
print('Making variable length dataset test files...')
f = h5py.File('test_vlen_datasets_earliest.hdf5', 'w', libver='earliest')
write_vlen_datasets(f)
print('created test_vlen_datasets_earliest.hdf5')
f = h5py.File('test_vlen_datasets_latest.hdf5', 'w', libver='latest')
write_vlen_datasets(f)
print('created test_vlen_datasets_latest.hdf5')
| 42.171429
| 124
| 0.699864
| 652
| 4,428
| 4.342025
| 0.111963
| 0.186507
| 0.254327
| 0.063582
| 0.858354
| 0.771459
| 0.734723
| 0.734723
| 0.734723
| 0.702225
| 0
| 0.085948
| 0.143406
| 4,428
| 104
| 125
| 42.576923
| 0.660427
| 0.088528
| 0
| 0.684932
| 0
| 0
| 0.111801
| 0.066832
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013699
| false
| 0
| 0.027397
| 0
| 0.041096
| 0.041096
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
64c5db3b0712ffed716f96fb13042832868c4ef5
| 4,440
|
py
|
Python
|
Protheus_WebApp/Modules/SIGATMS/TMSA021TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 17
|
2018-09-24T17:27:08.000Z
|
2021-09-16T19:09:46.000Z
|
Protheus_WebApp/Modules/SIGATMS/TMSA021TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 4
|
2018-09-24T17:30:32.000Z
|
2022-01-03T11:39:30.000Z
|
Protheus_WebApp/Modules/SIGATMS/TMSA021TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 18
|
2019-06-07T17:41:34.000Z
|
2022-01-31T18:17:31.000Z
|
from tir import Webapp
import unittest
class TMSA021(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGATMS", "02/06/2020", "T1", "M SP 03", "43")
inst.oHelper.Program("TMSA021")
def test_TMSA021_CT001(self):
self.oHelper.WaitShow("Classificacao ONU")
self.oHelper.SetButton('Incluir')
self.oHelper.SetBranch("M SP 03")
self.oHelper.SetValue("DY3_ONU", "4444")
self.oHelper.SetValue("DY3_DESCRI", "TESTE COVID PARA CAVALARIA MARITIMA")
self.oHelper.SetValue("DY3_CLASSE", "4",grid=True,grid_number=1,row=1)
self.oHelper.SetValue("DY3_NRISCO", "4.1",grid=True,grid_number=1,row=1)
self.oHelper.SetValue("DY3_LIMVEI", "12.000,00",grid=True,grid_number=1,row=1)
self.oHelper.SetValue("DY3_LIMEMB", "1.000,00",grid=True,grid_number=1,row=1)
self.oHelper.SetValue("DY3_UN", "CX",grid=True,grid_number=1,row=1)
self.oHelper.LoadGrid()
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Fechar")
self.oHelper.SetButton("Fechar")
self.oHelper.SearchBrowse("M SP 444401")
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("DY3_ONU", "4444")
self.oHelper.CheckResult("DY3_DESCRI", "TESTE COVID PARA CAVALARIA MARITIMA")
self.oHelper.CheckResult("DY3_CLASSE", "4",grid=True,grid_number=1)
self.oHelper.CheckResult("DY3_NRISCO", "4.1",grid=True,grid_number=1)
self.oHelper.CheckResult("DY3_LIMVEI", "12.000,00",grid=True,grid_number=1)
self.oHelper.CheckResult("DY3_LIMEMB", "1.000,00",grid=True,grid_number=1)
self.oHelper.CheckResult("DY3_UN", "CX",grid=True,grid_number=1)
self.oHelper.LoadGrid()
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_TMSA021_CT002(self):
self.oHelper.SearchBrowse("M SP 444401")
self.oHelper.SetButton("Alterar")
self.oHelper.SetValue("DY3_ONU", "4444")
self.oHelper.SetValue("DY3_DESCRI", "TESTE COVID PARA CAVALARIA MARITIMA")
self.oHelper.SetValue("DY3_CLASSE", "4",grid=True,grid_number=1,row=1)
self.oHelper.SetValue("DY3_NRISCO", "4.1",grid=True,grid_number=1,row=1)
self.oHelper.SetValue("DY3_LIMVEI", "15.000,00",grid=True,grid_number=1,row=1)
self.oHelper.SetValue("DY3_LIMEMB", "1.500,00",grid=True,grid_number=1,row=1)
self.oHelper.SetValue("DY3_UN", "CX",grid=True,grid_number=1,row=1)
self.oHelper.LoadGrid()
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Fechar")
self.oHelper.SetButton("Fechar")
self.oHelper.SearchBrowse("M SP 444401")
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("DY3_ONU", "4444")
self.oHelper.CheckResult("DY3_DESCRI", "TESTE COVID PARA CAVALARIA MARITIMA")
self.oHelper.CheckResult("DY3_CLASSE", "4",grid=True,grid_number=1)
self.oHelper.CheckResult("DY3_NRISCO", "4.1",grid=True,grid_number=1)
self.oHelper.CheckResult("DY3_LIMVEI", "15.000,00",grid=True,grid_number=1)
self.oHelper.CheckResult("DY3_LIMEMB", "1.500,00",grid=True,grid_number=1)
self.oHelper.CheckResult("DY3_UN", "CX",grid=True,grid_number=1)
self.oHelper.LoadGrid()
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_TMSA021_CT003(self):
self.oHelper.SearchBrowse("M SP 444401")
self.oHelper.SetButton("Outras Ações", "Excluir")
#self.oHelper.SetValue("DY3_ONU", "4444")
#self.oHelper.SetValue("DY3_DESCRI", "TESTE COVID PARA CAVALARIA MARITIMA")
#self.oHelper.SetValue("DY3_CLASSE", "4",grid=True,grid_number=1,row=1)
#self.oHelper.SetValue("DY3_NRISCO", "4.1",grid=True,grid_number=1,row=1)
#self.oHelper.SetValue("DY3_LIMVEI", "15.000,00",grid=True,grid_number=1,row=1)
#self.oHelper.SetValue("DY3_LIMEMB", "1.500,00",grid=True,grid_number=1,row=1)
#self.oHelper.SetValue("DY3_UN", "CX",grid=True,grid_number=1,row=1)
#self.oHelper.LoadGrid()
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == "__main__":
unittest.main()
| 39.642857
| 87
| 0.660811
| 591
| 4,440
| 4.839256
| 0.133672
| 0.246154
| 0.104895
| 0.157343
| 0.861189
| 0.861189
| 0.861189
| 0.844755
| 0.844755
| 0.844755
| 0
| 0.065457
| 0.181081
| 4,440
| 111
| 88
| 40
| 0.721122
| 0.112838
| 0
| 0.611111
| 0
| 0
| 0.187993
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.069444
| false
| 0
| 0.027778
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b3f64abb54f9e5361e5d4fc0844357770e5cb41b
| 14,831
|
py
|
Python
|
reducedmodels/transition_to_turbulence.py
|
anton-pershin/reducedmodels
|
67d467d9226f0cad22c965414cda1a73b7852b70
|
[
"MIT"
] | null | null | null |
reducedmodels/transition_to_turbulence.py
|
anton-pershin/reducedmodels
|
67d467d9226f0cad22c965414cda1a73b7852b70
|
[
"MIT"
] | null | null | null |
reducedmodels/transition_to_turbulence.py
|
anton-pershin/reducedmodels
|
67d467d9226f0cad22c965414cda1a73b7852b70
|
[
"MIT"
] | null | null | null |
import numpy as np
from reducedmodels.dynamical_systems import DynamicalSystem
class MoehlisFaisstEckhardtModel(DynamicalSystem):
def __init__(self, Re, L_x, L_z):
"""
This class sets up the shear-flow model from Moehlis et al. 2004. Namely, it is the right-hand side of the
amplitude equations
:param Re: Reynolds number
:param L_x: domain wavelength in the x-direction
:param L_y: domain wavelength in the y-direction
"""
self.Re = float(Re)
self.L_x = float(L_x)
self.L_z = float(L_z)
self.alpha = 2.*np.pi/L_x
self.beta = np.pi/2.
self.gamma = 2.*np.pi/L_z
self.N_8 = 2.*np.sqrt(2) / np.sqrt((self.alpha**2 + self.gamma**2) * (4.*self.alpha**2 + 4.*self.gamma**2 + np.pi**2))
self.k_ag = np.sqrt(self.alpha**2 + self.gamma**2)
self.k_bg = np.sqrt(self.beta**2 + self.gamma**2)
self.k_abg = np.sqrt(self.alpha**2 + self.beta**2 + self.gamma**2)
self.laminar_state = np.array([1., 0., 0., 0., 0., 0., 0., 0., 0.])
super().__init__(9)
def f(self, u):
"""
In the paper, amplitudes, denoted a_1, a_2, etc., are enumerated from 1 to 9. Here they are denoted u[0], u[1],
etc. and enumerated from 0 to 8.
"""
f_ = np.zeros((self.dim,))
f_[0] = self.beta**2/self.Re \
- self.beta**2/self.Re * u[0]\
- np.sqrt(3./2) * self.beta*self.gamma/self.k_abg * u[5] * u[7] \
+ np.sqrt(3./2) * self.beta*self.gamma/self.k_bg * u[1] * u[2]
f_[1] = - (4.*self.beta**2/3. + self.gamma**2)/self.Re * u[1] \
+ (5*np.sqrt(2))/(3*np.sqrt(3))*self.gamma**2/self.k_ag * u[3] * u[5] \
- self.gamma**2/(np.sqrt(6)*self.k_ag) * u[4] * u[6] \
- self.alpha*self.beta*self.gamma/(np.sqrt(6)*self.k_ag*self.k_abg) * u[4] * u[7] \
- np.sqrt(3./2)*self.beta*self.gamma/self.k_bg * u[0] * u[2] \
- np.sqrt(3./2)*self.beta*self.gamma/self.k_bg * u[2] * u[8]
f_[2] = - (self.beta**2 + self.gamma**2)/self.Re * u[2] \
+ (2./np.sqrt(6.)) * self.alpha*self.beta*self.gamma/(self.k_ag*self.k_bg) * (u[3]*u[6] + u[4]*u[5]) \
+ (self.beta**2*(3*self.alpha**2 + self.gamma**2) - 3*self.gamma**2*(self.alpha**2 + self.gamma**2))/(np.sqrt(6)*self.k_ag*self.k_abg*self.k_bg) * u[3]*u[7]
f_[3] = - (3*self.alpha**2 + 4*self.beta**2)/(3*self.Re) * u[3] \
- self.alpha/np.sqrt(6) * u[0]*u[4] \
- 10./(3.*np.sqrt(6)) * self.alpha**2/self.k_ag * u[1]*u[5] \
- np.sqrt(3./2) * self.alpha*self.beta*self.gamma/(self.k_ag*self.k_bg) * u[2]*u[6] \
- np.sqrt(3./2) * self.alpha**2*self.beta**2/(self.k_abg*self.k_ag*self.k_bg) * u[2]*u[7] \
- self.alpha/np.sqrt(6.) * u[4]*u[8]
f_[4] = - (self.alpha**2 + self.beta**2)/self.Re * u[4] \
+ self.alpha/np.sqrt(6) * u[0]*u[3] \
+ self.alpha**2/(np.sqrt(6)*self.k_ag) * u[1]*u[6] \
- self.alpha*self.beta*self.gamma/(np.sqrt(6)*self.k_abg*self.k_ag) * u[1]*u[7] \
+ self.alpha/np.sqrt(6) * u[3]*u[8] \
+ 2./np.sqrt(6.) * self.alpha*self.beta*self.gamma/(self.k_ag*self.k_bg) * u[2]*u[5]
f_[5] = - (3*self.alpha**2 + 4*self.beta**2 + 3*self.gamma**2)/(3*self.Re) * u[5] \
+ self.alpha/np.sqrt(6) * u[0]*u[6] \
+ np.sqrt(3./2) * self.beta*self.gamma/self.k_abg * u[0]*u[7] \
+ 10./(3.*np.sqrt(6.)) * (self.alpha**2 - self.gamma**2)/self.k_ag * u[1]*u[3] \
- 2.*np.sqrt(2./3)*self.alpha*self.beta*self.gamma/(self.k_ag*self.k_bg) * u[2]*u[4] \
+ self.alpha/np.sqrt(6) * u[6]*u[8] \
+ np.sqrt(3./2) * self.beta*self.gamma/self.k_abg * u[7]*u[8]
f_[6] = - (self.alpha**2 + self.beta**2 + self.gamma**2)/self.Re * u[6] \
- self.alpha/np.sqrt(6) * (u[0]*u[5] + u[5]*u[8]) \
+ 1./np.sqrt(6) * (self.gamma**2-self.alpha**2)/self.k_ag * u[1]*u[4] \
+ 1./np.sqrt(6) * (self.alpha*self.beta*self.gamma)/(self.k_ag*self.k_bg) * u[2]*u[3]
f_[7] = - (self.alpha**2 + self.beta**2 + self.gamma**2)/self.Re * u[7] \
+ 2./np.sqrt(6.) * (self.alpha*self.beta*self.gamma)/(self.k_abg*self.k_ag) * u[1]*u[4] \
+ self.gamma**2*(3*self.alpha**2 - self.beta**2 + 3*self.gamma**2)/(np.sqrt(6)*self.k_ag*self.k_abg*self.k_bg) * u[2]*u[3]
f_[8] = - 9*self.beta**2/self.Re * u[8] \
+ np.sqrt(3./2) * self.beta*self.gamma/self.k_bg * u[1]*u[2] \
- np.sqrt(3./2) * self.beta*self.gamma/self.k_abg * u[5]*u[7]
return f_
def modes(self, x, y, z):
"""
In the paper, modes are enumerated from 1 to 9. Here from 0 to 8.
:return:
"""
u = np.zeros((self.dim, 3))
cos_pi_y = np.cos(np.pi * self.gamma / 2.)
sin_pi_y = np.sin(np.pi * self.gamma / 2.)
cos_g_z = np.cos(self.gamma * z)
sin_g_z = np.sin(self.gamma * z)
cos_a_x = np.cos(self.alpha * x)
sin_a_x = np.sin(self.alpha * x)
u[0, :] = np.array([np.sqrt(2)*sin_pi_y, 0, 0])
u[1, :] = np.array([4./np.sqrt(3)*cos_pi_y**2 * cos_g_z, 0, 0])
u[2, :] = 2./np.sqrt(4.*self.gamma**2 + np.pi**2) * np.array([0, 2.*self.gamma*cos_pi_y * cos_g_z, np.pi * sin_pi_y * sin_g_z])
u[3, :] = np.array([0, 0, 4./np.sqrt(3)*cos_a_x * cos_pi_y**2])
u[4, :] = np.array([0, 0, 2.*sin_a_x * sin_pi_y])
u[5, :] = 4.* np.sqrt(2)/np.sqrt(3.*(self.alpha**2 + self.gamma**2)) * np.array([-self.gamma*cos_a_x*cos_pi_y**2*sin_g_z, 0, self.alpha*sin_a_x*cos_pi_y**2*cos_g_z])
u[6, :] = 2.* np.sqrt(2)/np.sqrt(self.alpha**2 + self.gamma**2) * np.array([self.gamma*sin_a_x*sin_pi_y*sin_g_z, 0, self.alpha*cos_a_x*sin_pi_y*cos_g_z])
u[7, :] = self.N_8 * np.array([np.pi*sin_a_x*sin_pi_y*sin_g_z, 2.*(self.alpha**2 + self.gamma**2)*cos_a_x*cos_pi_y*sin_g_z, -np.pi*self.gamma*cos_a_x*sin_pi_y*cos_g_z])
u[8, :] = np.array([np.sqrt(2)*np.sin(3.*np.pi*y/2.), 0, 0])
def kinetic_energy(self, u):
axis = 0
if len(u.shape) == 2:
axis = 1
return (2.*np.pi)**2 / (self.alpha * self.gamma) * np.sum(u**2, axis=axis)
class MoehlisFaisstEckhardtPerturbationDynamicsModel(DynamicalSystem):
def __init__(self, Re, L_x, L_z):
"""
This class sets up the shear-flow model from Moehlis et al. 2004. Namely, it is the right-hand side of the
amplitude equations. Here the equations are written with respect the perturbation \tilde{a} in the flow
decomposition a = a_{lam} + \tilde{a}, where a_{lam} = [1, 0, ..., 0] is the laminar solution.
:param Re: Reynolds number
:param L_x: domain wavelength in the x-direction
:param L_y: domain wavelength in the y-direction
"""
self.Re = float(Re)
self.L_x = float(L_x)
self.L_z = float(L_z)
self.alpha = 2.*np.pi/L_x
self.beta = np.pi/2.
self.gamma = 2.*np.pi/L_z
self.N_8 = 2.*np.sqrt(2) / np.sqrt((self.alpha**2 + self.gamma**2) * (4.*self.alpha**2 + 4.*self.gamma**2 + np.pi**2))
self.k_ag = np.sqrt(self.alpha**2 + self.gamma**2)
self.k_bg = np.sqrt(self.beta**2 + self.gamma**2)
self.k_abg = np.sqrt(self.alpha**2 + self.beta**2 + self.gamma**2)
self.laminar_state = np.array([1., 0., 0., 0., 0., 0., 0., 0., 0.])
super().__init__(9)
def f(self, u):
"""
In the paper, amplitudes, denoted a_1, a_2, etc., are enumerated from 1 to 9. Here they are denoted u[0], u[1],
etc. and enumerated from 0 to 8.
"""
f_ = np.zeros((self.dim,))
f_[0] = self.beta**2/self.Re \
- self.beta**2/self.Re * (u[0] + 1.)\
- np.sqrt(3./2) * self.beta*self.gamma/self.k_abg * u[5] * u[7] \
+ np.sqrt(3./2) * self.beta*self.gamma/self.k_bg * u[1] * u[2]
f_[1] = - (4.*self.beta**2/3. + self.gamma**2)/self.Re * u[1] \
+ (5*np.sqrt(2))/(3*np.sqrt(3))*self.gamma**2/self.k_ag * u[3] * u[5] \
- self.gamma**2/(np.sqrt(6)*self.k_ag) * u[4] * u[6] \
- self.alpha*self.beta*self.gamma/(np.sqrt(6)*self.k_ag*self.k_abg) * u[4] * u[7] \
- np.sqrt(3./2)*self.beta*self.gamma/self.k_bg * (u[0] + 1.) * u[2] \
- np.sqrt(3./2)*self.beta*self.gamma/self.k_bg * u[2] * u[8]
f_[2] = - (self.beta**2 + self.gamma**2)/self.Re * u[2] \
+ (2./np.sqrt(6.)) * self.alpha*self.beta*self.gamma/(self.k_ag*self.k_bg) * (u[3]*u[6] + u[4]*u[5]) \
+ (self.beta**2*(3*self.alpha**2 + self.gamma**2) - 3*self.gamma**2*(self.alpha**2 + self.gamma**2))/(np.sqrt(6)*self.k_ag*self.k_abg*self.k_bg) * u[3]*u[7]
f_[3] = - (3*self.alpha**2 + 4*self.beta**2)/(3*self.Re) * u[3] \
- self.alpha/np.sqrt(6) * (u[0] + 1.)*u[4] \
- 10./(3.*np.sqrt(6)) * self.alpha**2/self.k_ag * u[1]*u[5] \
- np.sqrt(3./2) * self.alpha*self.beta*self.gamma/(self.k_ag*self.k_bg) * u[2]*u[6] \
- np.sqrt(3./2) * self.alpha**2*self.beta**2/(self.k_abg*self.k_ag*self.k_bg) * u[2]*u[7] \
- self.alpha/np.sqrt(6.) * u[4]*u[8]
f_[4] = - (self.alpha**2 + self.beta**2)/self.Re * u[4] \
+ self.alpha/np.sqrt(6) * (u[0] + 1.)*u[3] \
+ self.alpha**2/(np.sqrt(6)*self.k_ag) * u[1]*u[6] \
- self.alpha*self.beta*self.gamma/(np.sqrt(6)*self.k_abg*self.k_ag) * u[1]*u[7] \
+ self.alpha/np.sqrt(6) * u[3]*u[8] \
+ 2./np.sqrt(6.) * self.alpha*self.beta*self.gamma/(self.k_ag*self.k_bg) * u[2]*u[5]
f_[5] = - (3*self.alpha**2 + 4*self.beta**2 + 3*self.gamma**2)/(3*self.Re) * u[5] \
+ self.alpha/np.sqrt(6) * (u[0] + 1.)*u[6] \
+ np.sqrt(3./2) * self.beta*self.gamma/self.k_abg * (u[0] + 1.)*u[7] \
+ 10./(3.*np.sqrt(6.)) * (self.alpha**2 - self.gamma**2)/self.k_ag * u[1]*u[3] \
- 2.*np.sqrt(2./3)*self.alpha*self.beta*self.gamma/(self.k_ag*self.k_bg) * u[2]*u[4] \
+ self.alpha/np.sqrt(6) * u[6]*u[8] \
+ np.sqrt(3./2) * self.beta*self.gamma/self.k_abg * u[7]*u[8]
f_[6] = - (self.alpha**2 + self.beta**2 + self.gamma**2)/self.Re * u[6] \
- self.alpha/np.sqrt(6) * ((u[0] + 1.)*u[5] + u[5]*u[8]) \
+ 1./np.sqrt(6) * (self.gamma**2-self.alpha**2)/self.k_ag * u[1]*u[4] \
+ 1./np.sqrt(6) * (self.alpha*self.beta*self.gamma)/(self.k_ag*self.k_bg) * u[2]*u[3]
f_[7] = - (self.alpha**2 + self.beta**2 + self.gamma**2)/self.Re * u[7] \
+ 2./np.sqrt(6.) * (self.alpha*self.beta*self.gamma)/(self.k_abg*self.k_ag) * u[1]*u[4] \
+ self.gamma**2*(3*self.alpha**2 - self.beta**2 + 3*self.gamma**2)/(np.sqrt(6)*self.k_ag*self.k_abg*self.k_bg) * u[2]*u[3]
f_[8] = - 9*self.beta**2/self.Re * u[8] \
+ np.sqrt(3./2) * self.beta*self.gamma/self.k_bg * u[1]*u[2] \
- np.sqrt(3./2) * self.beta*self.gamma/self.k_abg * u[5]*u[7]
return f_
def modes(self, x, y, z):
"""
In the paper, modes are enumerated from 1 to 9. Here from 0 to 8.
:return:
"""
u = np.zeros((self.dim, 3))
cos_pi_y = np.cos(np.pi * self.gamma / 2.)
sin_pi_y = np.sin(np.pi * self.gamma / 2.)
cos_g_z = np.cos(self.gamma * z)
sin_g_z = np.sin(self.gamma * z)
cos_a_x = np.cos(self.alpha * x)
sin_a_x = np.sin(self.alpha * x)
u[0, :] = np.array([np.sqrt(2)*sin_pi_y, 0, 0])
u[1, :] = np.array([4./np.sqrt(3)*cos_pi_y**2 * cos_g_z, 0, 0])
u[2, :] = 2./np.sqrt(4.*self.gamma**2 + np.pi**2) * np.array([0, 2.*self.gamma*cos_pi_y * cos_g_z, np.pi * sin_pi_y * sin_g_z])
u[3, :] = np.array([0, 0, 4./np.sqrt(3)*cos_a_x * cos_pi_y**2])
u[4, :] = np.array([0, 0, 2.*sin_a_x * sin_pi_y])
u[5, :] = 4.* np.sqrt(2)/np.sqrt(3.*(self.alpha**2 + self.gamma**2)) * np.array([-self.gamma*cos_a_x*cos_pi_y**2*sin_g_z, 0, self.alpha*sin_a_x*cos_pi_y**2*cos_g_z])
u[6, :] = 2.* np.sqrt(2)/np.sqrt(self.alpha**2 + self.gamma**2) * np.array([self.gamma*sin_a_x*sin_pi_y*sin_g_z, 0, self.alpha*cos_a_x*sin_pi_y*cos_g_z])
u[7, :] = self.N_8 * np.array([np.pi*sin_a_x*sin_pi_y*sin_g_z, 2.*(self.alpha**2 + self.gamma**2)*cos_a_x*cos_pi_y*sin_g_z, -np.pi*self.gamma*cos_a_x*sin_pi_y*cos_g_z])
u[8, :] = np.array([np.sqrt(2)*np.sin(3.*np.pi*y/2.), 0, 0])
def kinetic_energy(self, u):
axis = 0
if len(u.shape) == 2:
axis = 1
return (2.*np.pi)**2 / (self.alpha * self.gamma) * np.sum((u + self.laminar_state)**2, axis=axis)
class BarkleyPipeModel(DynamicalSystem):
def __init__(self, x_dim, delta_x, r, zeta, D, U_0, U_bar, delta, epsilon_1, epsilon_2, sigma):
"""
This class sets up the Barkley's pipe model as described in Barkley, 2016.
Periodic boundary conditions are assumed.
:param x_dim:
:param delta_x:
:param r:
:param zeta:
:param D:
:param U_0:
:param U_bar:
:param delta:
:param epsilon_1:
:param epsilon_2:
:param sigma:
"""
self.x_dim = x_dim
self.delta_x = delta_x
self.r = r
self.zeta = zeta
self.D = D
self.U_0 = U_0
self.U_bar = U_bar
self.delta = delta
self.epsilon_1 = epsilon_1
self.epsilon_2 = epsilon_2
self.sigma = sigma
super().__init__(2*self.x_dim)
def f(self, u):
f_ = np.zeros(self.dim)
for i in range(self.x_dim):
f_[i] = - 1./self.delta_x * (self._u(u, i) - self.zeta) * (self._q(u, i) - self._q(u, i-1)) \
+ self._f_local(u, i) \
+ self.D / self.delta_x**2 * (self._q(u, i-1) - 2*self._q(u, i) + self._q(u, i+1))
f_[self.x_dim + i] = - 1./self.delta_x * self._u(u, i) * (self._u(u, i) - self._u(u, i-1)) \
+ self._g_local(u, i)
return f_
def _q(self, u, i):
return u[i]
def _u(self, u, i):
return u[self.x_dim + i]
def _f_local(self, u, i):
return self._q(u, i)*(self.r + self._u(u, i) - self.U_0 - (self.r + self.delta)*(self._q(u, i) - 1.)**2)
def _g_local(self, u, i):
return self.epsilon_1 * (self.U_0 - self._u(u, i)) \
+ self.epsilon_2 * (self.U_bar - self._u(u, i)) * self._q(u, i)
| 52.221831
| 176
| 0.513249
| 2,775
| 14,831
| 2.605045
| 0.047207
| 0.076774
| 0.071932
| 0.061973
| 0.874948
| 0.863467
| 0.852262
| 0.852262
| 0.850878
| 0.846175
| 0
| 0.060074
| 0.267076
| 14,831
| 283
| 177
| 52.40636
| 0.604968
| 0.094599
| 0
| 0.744898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.010204
| 0.020408
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
377ba0c22e53abc4d4a74dcfb58dad003c79edf5
| 849,922
|
py
|
Python
|
pyboto3/redshift.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/redshift.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/redshift.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def accept_reserved_node_exchange(ReservedNodeId=None, TargetReservedNodeOfferingId=None):
"""
Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to the configuration (term, payment type, or number of nodes) and no additional costs.
See also: AWS API Documentation
Exceptions
:example: response = client.accept_reserved_node_exchange(
ReservedNodeId='string',
TargetReservedNodeOfferingId='string'
)
:type ReservedNodeId: string
:param ReservedNodeId: [REQUIRED]\nA string representing the node identifier of the DC1 Reserved Node to be exchanged.\n
:type TargetReservedNodeOfferingId: string
:param TargetReservedNodeOfferingId: [REQUIRED]\nThe unique identifier of the DC2 Reserved Node offering to be used for the exchange. You can obtain the value for the parameter by calling GetReservedNodeExchangeOfferings\n
:rtype: dict
ReturnsResponse Syntax
{
'ExchangedReservedNode': {
'ReservedNodeId': 'string',
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'StartTime': datetime(2015, 1, 1),
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'NodeCount': 123,
'State': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
}
}
Response Structure
(dict) --
ExchangedReservedNode (dict) --
ReservedNodeId (string) --
The unique identifier for the reservation.
ReservedNodeOfferingId (string) --
The identifier for the reserved node offering.
NodeType (string) --
The node type of the reserved node.
StartTime (datetime) --
The time the reservation started. You purchase a reserved node offering for a duration. This is the start time of that duration.
Duration (integer) --
The duration of the node reservation in seconds.
FixedPrice (float) --
The fixed cost Amazon Redshift charges you for this reserved node.
UsagePrice (float) --
The hourly rate Amazon Redshift charges you for this reserved node.
CurrencyCode (string) --
The currency code for the reserved cluster.
NodeCount (integer) --
The number of reserved compute nodes.
State (string) --
The state of the reserved compute node.
Possible Values:
pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
active-This reserved node is owned by the caller and is available for use.
payment-failed-Payment failed for the purchase attempt.
retired-The reserved node is no longer available.
exchanging-The owner is exchanging the reserved node for another reserved node.
OfferingType (string) --
The anticipated utilization of the reserved node, as defined in the reserved node offering.
RecurringCharges (list) --
The recurring charges for the reserved node.
(dict) --
Describes a recurring charge.
RecurringChargeAmount (float) --
The amount charged per the period of time specified by the recurring charge frequency.
RecurringChargeFrequency (string) --
The frequency at which the recurring charge amount is applied.
ReservedNodeOfferingType (string) --
Exceptions
Redshift.Client.exceptions.ReservedNodeNotFoundFault
Redshift.Client.exceptions.InvalidReservedNodeStateFault
Redshift.Client.exceptions.ReservedNodeAlreadyMigratedFault
Redshift.Client.exceptions.ReservedNodeOfferingNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
Redshift.Client.exceptions.DependentServiceUnavailableFault
Redshift.Client.exceptions.ReservedNodeAlreadyExistsFault
:return: {
'ExchangedReservedNode': {
'ReservedNodeId': 'string',
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'StartTime': datetime(2015, 1, 1),
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'NodeCount': 123,
'State': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
}
}
:returns:
pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
active-This reserved node is owned by the caller and is available for use.
payment-failed-Payment failed for the purchase attempt.
retired-The reserved node is no longer available.
exchanging-The owner is exchanging the reserved node for another reserved node.
"""
pass
def authorize_cluster_security_group_ingress(ClusterSecurityGroupName=None, CIDRIP=None, EC2SecurityGroupName=None, EC2SecurityGroupOwnerId=None):
"""
Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an Amazon EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.
If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName and EC2SecurityGroupOwnerId . The Amazon EC2 security group and Amazon Redshift cluster must be in the same AWS Region.
If you authorize access to a CIDR/IP address range, specify CIDRIP . For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing .
You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.authorize_cluster_security_group_ingress(
ClusterSecurityGroupName='string',
CIDRIP='string',
EC2SecurityGroupName='string',
EC2SecurityGroupOwnerId='string'
)
:type ClusterSecurityGroupName: string
:param ClusterSecurityGroupName: [REQUIRED]\nThe name of the security group to which the ingress rule is added.\n
:type CIDRIP: string
:param CIDRIP: The IP range to be added the Amazon Redshift security group.
:type EC2SecurityGroupName: string
:param EC2SecurityGroupName: The EC2 security group to be added the Amazon Redshift security group.
:type EC2SecurityGroupOwnerId: string
:param EC2SecurityGroupOwnerId: The AWS account number of the owner of the security group specified by the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value.\nExample: 111122223333\n
:rtype: dict
ReturnsResponse Syntax
{
'ClusterSecurityGroup': {
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
ClusterSecurityGroup (dict) --
Describes a security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group to which the operation was applied.
Description (string) --
A description of the security group.
EC2SecurityGroups (list) --
A list of EC2 security groups that are permitted to access clusters associated with this cluster security group.
(dict) --
Describes an Amazon EC2 security group.
Status (string) --
The status of the EC2 security group.
EC2SecurityGroupName (string) --
The name of the EC2 Security Group.
EC2SecurityGroupOwnerId (string) --
The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.
Tags (list) --
The list of tags for the EC2 security group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
IPRanges (list) --
A list of IP ranges (CIDR blocks) that are permitted to access clusters associated with this cluster security group.
(dict) --
Describes an IP range used in a security group.
Status (string) --
The status of the IP range, for example, "authorized".
CIDRIP (string) --
The IP range in Classless Inter-Domain Routing (CIDR) notation.
Tags (list) --
The list of tags for the IP range.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Tags (list) --
The list of tags for the cluster security group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.InvalidClusterSecurityGroupStateFault
Redshift.Client.exceptions.AuthorizationAlreadyExistsFault
Redshift.Client.exceptions.AuthorizationQuotaExceededFault
:return: {
'ClusterSecurityGroup': {
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.InvalidClusterSecurityGroupStateFault
Redshift.Client.exceptions.AuthorizationAlreadyExistsFault
Redshift.Client.exceptions.AuthorizationQuotaExceededFault
"""
pass
def authorize_snapshot_access(SnapshotIdentifier=None, SnapshotClusterIdentifier=None, AccountWithRestoreAccess=None):
"""
Authorizes the specified AWS customer account to restore the specified snapshot.
For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.authorize_snapshot_access(
SnapshotIdentifier='string',
SnapshotClusterIdentifier='string',
AccountWithRestoreAccess='string'
)
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]\nThe identifier of the snapshot the account is authorized to restore.\n
:type SnapshotClusterIdentifier: string
:param SnapshotClusterIdentifier: The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
:type AccountWithRestoreAccess: string
:param AccountWithRestoreAccess: [REQUIRED]\nThe identifier of the AWS customer account authorized to restore the specified snapshot.\nTo share a snapshot with AWS support, specify amazon-redshift-support.\n
:rtype: dict
ReturnsResponse Syntax
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Snapshot (dict) --
Describes a snapshot.
SnapshotIdentifier (string) --
The snapshot identifier that is provided in the request.
ClusterIdentifier (string) --
The identifier of the cluster for which the snapshot was taken.
SnapshotCreateTime (datetime) --
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
Status (string) --
The snapshot status. The value of the status depends on the API operation used:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
Port (integer) --
The port that the cluster is listening on.
AvailabilityZone (string) --
The Availability Zone in which the cluster was created.
ClusterCreateTime (datetime) --
The time (UTC) when the cluster was originally created.
MasterUsername (string) --
The master user name for the cluster.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
SnapshotType (string) --
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
NodeType (string) --
The node type of the nodes in the cluster.
NumberOfNodes (integer) --
The number of nodes in the cluster.
DBName (string) --
The name of the database that was created when the cluster was created.
VpcId (string) --
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
Encrypted (boolean) --
If true , the data in the snapshot is encrypted at rest.
KmsKeyId (string) --
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
EncryptedWithHSM (boolean) --
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.
AccountsWithRestoreAccess (list) --
A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.
(dict) --
Describes an AWS customer account authorized to restore a snapshot.
AccountId (string) --
The identifier of an AWS customer account authorized to restore a snapshot.
AccountAlias (string) --
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support .
OwnerAccount (string) --
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
TotalBackupSizeInMegaBytes (float) --
The size of the complete set of backup data that would be used to restore the cluster.
ActualIncrementalBackupSizeInMegaBytes (float) --
The size of the incremental backup.
BackupProgressInMegaBytes (float) --
The number of megabytes that have been transferred to the snapshot backup.
CurrentBackupRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.
EstimatedSecondsToCompletion (integer) --
The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
SourceRegion (string) --
The source region from which the snapshot was copied.
Tags (list) --
The list of tags for the cluster snapshot.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
RestorableNodeTypes (list) --
The list of node types that this cluster snapshot is able to restore into.
(string) --
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track for the snapshot.
ManualSnapshotRetentionPeriod (integer) --
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
ManualSnapshotRemainingDays (integer) --
The number of days until a manual snapshot will pass its retention period.
SnapshotRetentionStartTime (datetime) --
A timestamp representing the start of the retention period for the snapshot.
Exceptions
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.AuthorizationAlreadyExistsFault
Redshift.Client.exceptions.AuthorizationQuotaExceededFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.LimitExceededFault
:return: {
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def batch_delete_cluster_snapshots(Identifiers=None):
"""
Deletes a set of cluster snapshots.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_cluster_snapshots(
Identifiers=[
{
'SnapshotIdentifier': 'string',
'SnapshotClusterIdentifier': 'string'
},
]
)
:type Identifiers: list
:param Identifiers: [REQUIRED]\nA list of identifiers for the snapshots that you want to delete.\n\n(dict) --\nSnapshotIdentifier (string) -- [REQUIRED]The unique identifier of the manual snapshot to be deleted.\nConstraints: Must be the name of an existing snapshot that is in the available , failed , or cancelled state.\n\nSnapshotClusterIdentifier (string) --The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.\nConstraints: Must be the name of valid cluster.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax{
'Resources': [
'string',
],
'Errors': [
{
'SnapshotIdentifier': 'string',
'SnapshotClusterIdentifier': 'string',
'FailureCode': 'string',
'FailureReason': 'string'
},
]
}
Response Structure
(dict) --
Resources (list) --A list of the snapshot identifiers that were deleted.
(string) --
Errors (list) --A list of any errors returned.
(dict) --Describes the errors returned by a snapshot.
SnapshotIdentifier (string) --A unique identifier for the snapshot returning the error.
SnapshotClusterIdentifier (string) --A unique identifier for the cluster.
FailureCode (string) --The failure code for the error.
FailureReason (string) --The text message describing the error.
Exceptions
Redshift.Client.exceptions.BatchDeleteRequestSizeExceededFault
:return: {
'Resources': [
'string',
],
'Errors': [
{
'SnapshotIdentifier': 'string',
'SnapshotClusterIdentifier': 'string',
'FailureCode': 'string',
'FailureReason': 'string'
},
]
}
:returns:
Redshift.Client.exceptions.BatchDeleteRequestSizeExceededFault
"""
pass
def batch_modify_cluster_snapshots(SnapshotIdentifierList=None, ManualSnapshotRetentionPeriod=None, Force=None):
"""
Modifies the settings for a set of cluster snapshots.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_modify_cluster_snapshots(
SnapshotIdentifierList=[
'string',
],
ManualSnapshotRetentionPeriod=123,
Force=True|False
)
:type SnapshotIdentifierList: list
:param SnapshotIdentifierList: [REQUIRED]\nA list of snapshot identifiers you want to modify.\n\n(string) --\n\n
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod: The number of days that a manual snapshot is retained. If you specify the value -1, the manual snapshot is retained indefinitely.\nThe number must be either -1 or an integer between 1 and 3,653.\nIf you decrease the manual snapshot retention period from its current value, existing manual snapshots that fall outside of the new retention period will return an error. If you want to suppress the errors and delete the snapshots, use the force option.\n
:type Force: boolean
:param Force: A boolean value indicating whether to override an exception if the retention period has passed.
:rtype: dict
ReturnsResponse Syntax
{
'Resources': [
'string',
],
'Errors': [
{
'SnapshotIdentifier': 'string',
'SnapshotClusterIdentifier': 'string',
'FailureCode': 'string',
'FailureReason': 'string'
},
]
}
Response Structure
(dict) --
Resources (list) --
A list of the snapshots that were modified.
(string) --
Errors (list) --
A list of any errors returned.
(dict) --
Describes the errors returned by a snapshot.
SnapshotIdentifier (string) --
A unique identifier for the snapshot returning the error.
SnapshotClusterIdentifier (string) --
A unique identifier for the cluster.
FailureCode (string) --
The failure code for the error.
FailureReason (string) --
The text message describing the error.
Exceptions
Redshift.Client.exceptions.InvalidRetentionPeriodFault
Redshift.Client.exceptions.BatchModifyClusterSnapshotsLimitExceededFault
:return: {
'Resources': [
'string',
],
'Errors': [
{
'SnapshotIdentifier': 'string',
'SnapshotClusterIdentifier': 'string',
'FailureCode': 'string',
'FailureReason': 'string'
},
]
}
:returns:
(string) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def cancel_resize(ClusterIdentifier=None):
"""
Cancels a resize operation for a cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.cancel_resize(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier for the cluster that you want to cancel a resize operation for.\n
:rtype: dict
ReturnsResponse Syntax{
'TargetNodeType': 'string',
'TargetNumberOfNodes': 123,
'TargetClusterType': 'string',
'Status': 'string',
'ImportTablesCompleted': [
'string',
],
'ImportTablesInProgress': [
'string',
],
'ImportTablesNotStarted': [
'string',
],
'AvgResizeRateInMegaBytesPerSecond': 123.0,
'TotalResizeDataInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ResizeType': 'string',
'Message': 'string',
'TargetEncryptionType': 'string',
'DataTransferProgressPercent': 123.0
}
Response Structure
(dict) --Describes the result of a cluster resize operation.
TargetNodeType (string) --The node type that the cluster will have after the resize operation is complete.
TargetNumberOfNodes (integer) --The number of nodes that the cluster will have after the resize operation is complete.
TargetClusterType (string) --The cluster type after the resize operation is complete.
Valid Values: multi-node | single-node
Status (string) --The status of the resize operation.
Valid Values: NONE | IN_PROGRESS | FAILED | SUCCEEDED | CANCELLING
ImportTablesCompleted (list) --The names of tables that have been completely imported .
Valid Values: List of table names.
(string) --
ImportTablesInProgress (list) --The names of tables that are being currently imported.
Valid Values: List of table names.
(string) --
ImportTablesNotStarted (list) --The names of tables that have not been yet imported.
Valid Values: List of table names
(string) --
AvgResizeRateInMegaBytesPerSecond (float) --The average rate of the resize operation over the last few minutes, measured in megabytes per second. After the resize operation completes, this value shows the average rate of the entire resize operation.
TotalResizeDataInMegaBytes (integer) --The estimated total amount of data, in megabytes, on the cluster before the resize operation began.
ProgressInMegaBytes (integer) --While the resize operation is in progress, this value shows the current amount of data, in megabytes, that has been processed so far. When the resize operation is complete, this value shows the total amount of data, in megabytes, on the cluster, which may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data before resize).
ElapsedTimeInSeconds (integer) --The amount of seconds that have elapsed since the resize operation began. After the resize operation completes, this value shows the total actual time, in seconds, for the resize operation.
EstimatedTimeToCompletionInSeconds (integer) --The estimated time remaining, in seconds, until the resize operation is complete. This value is calculated based on the average resize rate and the estimated amount of data remaining to be processed. Once the resize operation is complete, this value will be 0.
ResizeType (string) --An enum with possible values of ClassicResize and ElasticResize . These values describe the type of resize operation being performed.
Message (string) --An optional string to provide additional details about the resize action.
TargetEncryptionType (string) --The type of encryption for the cluster after the resize is complete.
Possible values are KMS and None . In the China region possible values are: Legacy and None .
DataTransferProgressPercent (float) --The percent of data transferred from source cluster to target cluster.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.ResizeNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.UnsupportedOperationFault
:return: {
'TargetNodeType': 'string',
'TargetNumberOfNodes': 123,
'TargetClusterType': 'string',
'Status': 'string',
'ImportTablesCompleted': [
'string',
],
'ImportTablesInProgress': [
'string',
],
'ImportTablesNotStarted': [
'string',
],
'AvgResizeRateInMegaBytesPerSecond': 123.0,
'TotalResizeDataInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ResizeType': 'string',
'Message': 'string',
'TargetEncryptionType': 'string',
'DataTransferProgressPercent': 123.0
}
:returns:
(string) --
"""
pass
def copy_cluster_snapshot(SourceSnapshotIdentifier=None, SourceSnapshotClusterIdentifier=None, TargetSnapshotIdentifier=None, ManualSnapshotRetentionPeriod=None):
"""
Copies the specified automated cluster snapshot to a new manual cluster snapshot. The source must be an automated snapshot and it must be in the available state.
When you delete a cluster, Amazon Redshift deletes any automated snapshots of the cluster. Also, when the retention period of the snapshot expires, Amazon Redshift automatically deletes it. If you want to keep an automated snapshot for a longer period, you can make a manual copy of the snapshot. Manual snapshots are retained until you delete them.
For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.copy_cluster_snapshot(
SourceSnapshotIdentifier='string',
SourceSnapshotClusterIdentifier='string',
TargetSnapshotIdentifier='string',
ManualSnapshotRetentionPeriod=123
)
:type SourceSnapshotIdentifier: string
:param SourceSnapshotIdentifier: [REQUIRED]\nThe identifier for the source snapshot.\nConstraints:\n\nMust be the identifier for a valid automated snapshot whose state is available .\n\n
:type SourceSnapshotClusterIdentifier: string
:param SourceSnapshotClusterIdentifier: The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.\nConstraints:\n\nMust be the identifier for a valid cluster.\n\n
:type TargetSnapshotIdentifier: string
:param TargetSnapshotIdentifier: [REQUIRED]\nThe identifier given to the new manual snapshot.\nConstraints:\n\nCannot be null, empty, or blank.\nMust contain from 1 to 255 alphanumeric characters or hyphens.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\nMust be unique for the AWS account that is making the request.\n\n
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod: The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.\nThe value must be either -1 or an integer between 1 and 3,653.\nThe default value is -1.\n
:rtype: dict
ReturnsResponse Syntax
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Snapshot (dict) --
Describes a snapshot.
SnapshotIdentifier (string) --
The snapshot identifier that is provided in the request.
ClusterIdentifier (string) --
The identifier of the cluster for which the snapshot was taken.
SnapshotCreateTime (datetime) --
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
Status (string) --
The snapshot status. The value of the status depends on the API operation used:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
Port (integer) --
The port that the cluster is listening on.
AvailabilityZone (string) --
The Availability Zone in which the cluster was created.
ClusterCreateTime (datetime) --
The time (UTC) when the cluster was originally created.
MasterUsername (string) --
The master user name for the cluster.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
SnapshotType (string) --
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
NodeType (string) --
The node type of the nodes in the cluster.
NumberOfNodes (integer) --
The number of nodes in the cluster.
DBName (string) --
The name of the database that was created when the cluster was created.
VpcId (string) --
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
Encrypted (boolean) --
If true , the data in the snapshot is encrypted at rest.
KmsKeyId (string) --
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
EncryptedWithHSM (boolean) --
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.
AccountsWithRestoreAccess (list) --
A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.
(dict) --
Describes an AWS customer account authorized to restore a snapshot.
AccountId (string) --
The identifier of an AWS customer account authorized to restore a snapshot.
AccountAlias (string) --
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support .
OwnerAccount (string) --
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
TotalBackupSizeInMegaBytes (float) --
The size of the complete set of backup data that would be used to restore the cluster.
ActualIncrementalBackupSizeInMegaBytes (float) --
The size of the incremental backup.
BackupProgressInMegaBytes (float) --
The number of megabytes that have been transferred to the snapshot backup.
CurrentBackupRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.
EstimatedSecondsToCompletion (integer) --
The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
SourceRegion (string) --
The source region from which the snapshot was copied.
Tags (list) --
The list of tags for the cluster snapshot.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
RestorableNodeTypes (list) --
The list of node types that this cluster snapshot is able to restore into.
(string) --
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track for the snapshot.
ManualSnapshotRetentionPeriod (integer) --
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
ManualSnapshotRemainingDays (integer) --
The number of days until a manual snapshot will pass its retention period.
SnapshotRetentionStartTime (datetime) --
A timestamp representing the start of the retention period for the snapshot.
Exceptions
Redshift.Client.exceptions.ClusterSnapshotAlreadyExistsFault
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.ClusterSnapshotQuotaExceededFault
Redshift.Client.exceptions.InvalidRetentionPeriodFault
:return: {
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def create_cluster(DBName=None, ClusterIdentifier=None, ClusterType=None, NodeType=None, MasterUsername=None, MasterUserPassword=None, ClusterSecurityGroups=None, VpcSecurityGroupIds=None, ClusterSubnetGroupName=None, AvailabilityZone=None, PreferredMaintenanceWindow=None, ClusterParameterGroupName=None, AutomatedSnapshotRetentionPeriod=None, ManualSnapshotRetentionPeriod=None, Port=None, ClusterVersion=None, AllowVersionUpgrade=None, NumberOfNodes=None, PubliclyAccessible=None, Encrypted=None, HsmClientCertificateIdentifier=None, HsmConfigurationIdentifier=None, ElasticIp=None, Tags=None, KmsKeyId=None, EnhancedVpcRouting=None, AdditionalInfo=None, IamRoles=None, MaintenanceTrackName=None, SnapshotScheduleIdentifier=None):
"""
Creates a new cluster with the specified parameters.
To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_cluster(
DBName='string',
ClusterIdentifier='string',
ClusterType='string',
NodeType='string',
MasterUsername='string',
MasterUserPassword='string',
ClusterSecurityGroups=[
'string',
],
VpcSecurityGroupIds=[
'string',
],
ClusterSubnetGroupName='string',
AvailabilityZone='string',
PreferredMaintenanceWindow='string',
ClusterParameterGroupName='string',
AutomatedSnapshotRetentionPeriod=123,
ManualSnapshotRetentionPeriod=123,
Port=123,
ClusterVersion='string',
AllowVersionUpgrade=True|False,
NumberOfNodes=123,
PubliclyAccessible=True|False,
Encrypted=True|False,
HsmClientCertificateIdentifier='string',
HsmConfigurationIdentifier='string',
ElasticIp='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
KmsKeyId='string',
EnhancedVpcRouting=True|False,
AdditionalInfo='string',
IamRoles=[
'string',
],
MaintenanceTrackName='string',
SnapshotScheduleIdentifier='string'
)
:type DBName: string
:param DBName: The name of the first database to be created when the cluster is created.\nTo create additional databases after the cluster is created, connect to the cluster with a SQL client and use SQL commands to create a database. For more information, go to Create a Database in the Amazon Redshift Database Developer Guide.\nDefault: dev\nConstraints:\n\nMust contain 1 to 64 alphanumeric characters.\nMust contain only lowercase letters.\nCannot be a word that is reserved by the service. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.\n\n
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nA unique identifier for the cluster. You use this identifier to refer to the cluster for any subsequent cluster operations such as deleting or modifying. The identifier also appears in the Amazon Redshift console.\nConstraints:\n\nMust contain from 1 to 63 alphanumeric characters or hyphens.\nAlphabetic characters must be lowercase.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\nMust be unique for all clusters within an AWS account.\n\nExample: myexamplecluster\n
:type ClusterType: string
:param ClusterType: The type of the cluster. When cluster type is specified as\n\nsingle-node , the NumberOfNodes parameter is not required.\nmulti-node , the NumberOfNodes parameter is required.\n\nValid Values: multi-node | single-node\nDefault: multi-node\n
:type NodeType: string
:param NodeType: [REQUIRED]\nThe node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide .\nValid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge\n
:type MasterUsername: string
:param MasterUsername: [REQUIRED]\nThe user name associated with the master user account for the cluster that is being created.\nConstraints:\n\nMust be 1 - 128 alphanumeric characters. The user name can\'t be PUBLIC .\nFirst character must be a letter.\nCannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.\n\n
:type MasterUserPassword: string
:param MasterUserPassword: [REQUIRED]\nThe password associated with the master user account for the cluster that is being created.\nConstraints:\n\nMust be between 8 and 64 characters in length.\nMust contain at least one uppercase letter.\nMust contain at least one lowercase letter.\nMust contain one number.\nCan be any printable ASCII character (ASCII code 33 to 126) except \' (single quote), ' (double quote), , /, @, or space.\n\n
:type ClusterSecurityGroups: list
:param ClusterSecurityGroups: A list of security groups to be associated with this cluster.\nDefault: The default cluster security group for Amazon Redshift.\n\n(string) --\n\n
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.\nDefault: The default VPC security group is associated with the cluster.\n\n(string) --\n\n
:type ClusterSubnetGroupName: string
:param ClusterSubnetGroupName: The name of a cluster subnet group to be associated with this cluster.\nIf this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC).\n
:type AvailabilityZone: string
:param AvailabilityZone: The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency.\nDefault: A random, system-chosen Availability Zone in the region that is specified by the endpoint.\nExample: us-east-2d\nConstraint: The specified Availability Zone must be in the same region as the current endpoint.\n
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The weekly time range (in UTC) during which automated cluster maintenance can occur.\nFormat: ddd:hh24:mi-ddd:hh24:mi\nDefault: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.\nValid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun\nConstraints: Minimum 30-minute window.\n
:type ClusterParameterGroupName: string
:param ClusterParameterGroupName: The name of the parameter group to be associated with this cluster.\nDefault: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups\nConstraints:\n\nMust be 1 to 255 alphanumeric characters or hyphens.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:type AutomatedSnapshotRetentionPeriod: integer
:param AutomatedSnapshotRetentionPeriod: The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot .\nDefault: 1\nConstraints: Must be a value from 0 to 35.\n
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod: The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.\nThe value must be either -1 or an integer between 1 and 3,653.\n
:type Port: integer
:param Port: The port number on which the cluster accepts incoming connections.\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\nDefault: 5439\nValid Values: 1150-65535\n
:type ClusterVersion: string
:param ClusterVersion: The version of the Amazon Redshift engine software that you want to deploy on the cluster.\nThe version selected runs on all the nodes in the cluster.\nConstraints: Only version 1.0 is currently available.\nExample: 1.0\n
:type AllowVersionUpgrade: boolean
:param AllowVersionUpgrade: If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.\nWhen a new major version of the Amazon Redshift engine is released, you can request that the service automatically apply upgrades during the maintenance window to the Amazon Redshift engine that is running on your cluster.\nDefault: true\n
:type NumberOfNodes: integer
:param NumberOfNodes: The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node .\nFor information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift Cluster Management Guide .\nIf you don\'t specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster.\nDefault: 1\nConstraints: Value must be at least 1 and no more than 100.\n
:type PubliclyAccessible: boolean
:param PubliclyAccessible: If true , the cluster can be accessed from a public network.
:type Encrypted: boolean
:param Encrypted: If true , the data in the cluster is encrypted at rest.\nDefault: false\n
:type HsmClientCertificateIdentifier: string
:param HsmClientCertificateIdentifier: Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
:type ElasticIp: string
:param ElasticIp: The Elastic IP (EIP) address for the cluster.\nConstraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide.\n
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:type KmsKeyId: string
:param KmsKeyId: The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.
:type EnhancedVpcRouting: boolean
:param EnhancedVpcRouting: An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.\nIf this option is true , enhanced VPC routing is enabled.\nDefault: false\n
:type AdditionalInfo: string
:param AdditionalInfo: Reserved.
:type IamRoles: list
:param IamRoles: A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.\nA cluster can have up to 10 IAM roles associated with it at any time.\n\n(string) --\n\n
:type MaintenanceTrackName: string
:param MaintenanceTrackName: An optional parameter for the name of the maintenance track for the cluster. If you don\'t provide a maintenance track name, the cluster is assigned to the current track.
:type SnapshotScheduleIdentifier: string
:param SnapshotScheduleIdentifier: A unique identifier for the snapshot schedule.
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterAlreadyExistsFault
Redshift.Client.exceptions.InsufficientClusterCapacityFault
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.ClusterQuotaExceededFault
Redshift.Client.exceptions.NumberOfNodesQuotaExceededFault
Redshift.Client.exceptions.NumberOfNodesPerClusterLimitExceededFault
Redshift.Client.exceptions.ClusterSubnetGroupNotFoundFault
Redshift.Client.exceptions.InvalidVPCNetworkStateFault
Redshift.Client.exceptions.InvalidClusterSubnetGroupStateFault
Redshift.Client.exceptions.InvalidSubnet
Redshift.Client.exceptions.UnauthorizedOperation
Redshift.Client.exceptions.HsmClientCertificateNotFoundFault
Redshift.Client.exceptions.HsmConfigurationNotFoundFault
Redshift.Client.exceptions.InvalidElasticIpFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
Redshift.Client.exceptions.LimitExceededFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
Redshift.Client.exceptions.InvalidClusterTrackFault
Redshift.Client.exceptions.SnapshotScheduleNotFoundFault
Redshift.Client.exceptions.InvalidRetentionPeriodFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def create_cluster_parameter_group(ParameterGroupName=None, ParameterGroupFamily=None, Description=None, Tags=None):
"""
Creates an Amazon Redshift parameter group.
Creating parameter groups is independent of creating clusters. You can associate a cluster with a parameter group when you create the cluster. You can also associate an existing cluster with a parameter group after the cluster is created by using ModifyCluster .
Parameters in the parameter group define specific behavior that applies to the databases you create on the cluster. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_cluster_parameter_group(
ParameterGroupName='string',
ParameterGroupFamily='string',
Description='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of the cluster parameter group.\nConstraints:\n\nMust be 1 to 255 alphanumeric characters or hyphens\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\nMust be unique withing your AWS account.\n\n\nNote\nThis value is stored as a lower-case string.\n\n
:type ParameterGroupFamily: string
:param ParameterGroupFamily: [REQUIRED]\nThe Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters.\nTo get a list of valid parameter group family names, you can call DescribeClusterParameterGroups . By default, Amazon Redshift returns a list of all the parameter groups that are owned by your AWS account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is 'redshift-1.0'.\n
:type Description: string
:param Description: [REQUIRED]\nA description of the parameter group.\n
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ClusterParameterGroup': {
'ParameterGroupName': 'string',
'ParameterGroupFamily': 'string',
'Description': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
ClusterParameterGroup (dict) --
Describes a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterGroupFamily (string) --
The name of the cluster parameter group family that this cluster parameter group is compatible with.
Description (string) --
The description of the parameter group.
Tags (list) --
The list of tags for the cluster parameter group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterParameterGroupQuotaExceededFault
Redshift.Client.exceptions.ClusterParameterGroupAlreadyExistsFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'ClusterParameterGroup': {
'ParameterGroupName': 'string',
'ParameterGroupFamily': 'string',
'Description': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Redshift.Client.exceptions.ClusterParameterGroupQuotaExceededFault
Redshift.Client.exceptions.ClusterParameterGroupAlreadyExistsFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def create_cluster_security_group(ClusterSecurityGroupName=None, Description=None, Tags=None):
"""
Creates a new Amazon Redshift security group. You use security groups to control access to non-VPC clusters.
For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_cluster_security_group(
ClusterSecurityGroupName='string',
Description='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ClusterSecurityGroupName: string
:param ClusterSecurityGroupName: [REQUIRED]\nThe name for the security group. Amazon Redshift stores the value as a lowercase string.\nConstraints:\n\nMust contain no more than 255 alphanumeric characters or hyphens.\nMust not be 'Default'.\nMust be unique for all security groups that are created by your AWS account.\n\nExample: examplesecuritygroup\n
:type Description: string
:param Description: [REQUIRED]\nA description for the security group.\n
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ClusterSecurityGroup': {
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
ClusterSecurityGroup (dict) --
Describes a security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group to which the operation was applied.
Description (string) --
A description of the security group.
EC2SecurityGroups (list) --
A list of EC2 security groups that are permitted to access clusters associated with this cluster security group.
(dict) --
Describes an Amazon EC2 security group.
Status (string) --
The status of the EC2 security group.
EC2SecurityGroupName (string) --
The name of the EC2 Security Group.
EC2SecurityGroupOwnerId (string) --
The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.
Tags (list) --
The list of tags for the EC2 security group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
IPRanges (list) --
A list of IP ranges (CIDR blocks) that are permitted to access clusters associated with this cluster security group.
(dict) --
Describes an IP range used in a security group.
Status (string) --
The status of the IP range, for example, "authorized".
CIDRIP (string) --
The IP range in Classless Inter-Domain Routing (CIDR) notation.
Tags (list) --
The list of tags for the IP range.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Tags (list) --
The list of tags for the cluster security group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterSecurityGroupAlreadyExistsFault
Redshift.Client.exceptions.ClusterSecurityGroupQuotaExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'ClusterSecurityGroup': {
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Redshift.Client.exceptions.ClusterSecurityGroupAlreadyExistsFault
Redshift.Client.exceptions.ClusterSecurityGroupQuotaExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def create_cluster_snapshot(SnapshotIdentifier=None, ClusterIdentifier=None, ManualSnapshotRetentionPeriod=None, Tags=None):
"""
Creates a manual snapshot of the specified cluster. The cluster must be in the available state.
For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_cluster_snapshot(
SnapshotIdentifier='string',
ClusterIdentifier='string',
ManualSnapshotRetentionPeriod=123,
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]\nA unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the AWS account.\nConstraints:\n\nCannot be null, empty, or blank\nMust contain from 1 to 255 alphanumeric characters or hyphens\nFirst character must be a letter\nCannot end with a hyphen or contain two consecutive hyphens\n\nExample: my-snapshot-id\n
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe cluster identifier for which you want a snapshot.\n
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod: The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.\nThe value must be either -1 or an integer between 1 and 3,653.\nThe default value is -1.\n
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Snapshot (dict) --
Describes a snapshot.
SnapshotIdentifier (string) --
The snapshot identifier that is provided in the request.
ClusterIdentifier (string) --
The identifier of the cluster for which the snapshot was taken.
SnapshotCreateTime (datetime) --
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
Status (string) --
The snapshot status. The value of the status depends on the API operation used:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
Port (integer) --
The port that the cluster is listening on.
AvailabilityZone (string) --
The Availability Zone in which the cluster was created.
ClusterCreateTime (datetime) --
The time (UTC) when the cluster was originally created.
MasterUsername (string) --
The master user name for the cluster.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
SnapshotType (string) --
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
NodeType (string) --
The node type of the nodes in the cluster.
NumberOfNodes (integer) --
The number of nodes in the cluster.
DBName (string) --
The name of the database that was created when the cluster was created.
VpcId (string) --
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
Encrypted (boolean) --
If true , the data in the snapshot is encrypted at rest.
KmsKeyId (string) --
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
EncryptedWithHSM (boolean) --
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.
AccountsWithRestoreAccess (list) --
A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.
(dict) --
Describes an AWS customer account authorized to restore a snapshot.
AccountId (string) --
The identifier of an AWS customer account authorized to restore a snapshot.
AccountAlias (string) --
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support .
OwnerAccount (string) --
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
TotalBackupSizeInMegaBytes (float) --
The size of the complete set of backup data that would be used to restore the cluster.
ActualIncrementalBackupSizeInMegaBytes (float) --
The size of the incremental backup.
BackupProgressInMegaBytes (float) --
The number of megabytes that have been transferred to the snapshot backup.
CurrentBackupRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.
EstimatedSecondsToCompletion (integer) --
The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
SourceRegion (string) --
The source region from which the snapshot was copied.
Tags (list) --
The list of tags for the cluster snapshot.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
RestorableNodeTypes (list) --
The list of node types that this cluster snapshot is able to restore into.
(string) --
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track for the snapshot.
ManualSnapshotRetentionPeriod (integer) --
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
ManualSnapshotRemainingDays (integer) --
The number of days until a manual snapshot will pass its retention period.
SnapshotRetentionStartTime (datetime) --
A timestamp representing the start of the retention period for the snapshot.
Exceptions
Redshift.Client.exceptions.ClusterSnapshotAlreadyExistsFault
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.ClusterSnapshotQuotaExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
Redshift.Client.exceptions.InvalidRetentionPeriodFault
:return: {
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def create_cluster_subnet_group(ClusterSubnetGroupName=None, Description=None, SubnetIds=None, Tags=None):
"""
Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.
For information about subnet groups, go to Amazon Redshift Cluster Subnet Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_cluster_subnet_group(
ClusterSubnetGroupName='string',
Description='string',
SubnetIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ClusterSubnetGroupName: string
:param ClusterSubnetGroupName: [REQUIRED]\nThe name for the subnet group. Amazon Redshift stores the value as a lowercase string.\nConstraints:\n\nMust contain no more than 255 alphanumeric characters or hyphens.\nMust not be 'Default'.\nMust be unique for all subnet groups that are created by your AWS account.\n\nExample: examplesubnetgroup\n
:type Description: string
:param Description: [REQUIRED]\nA description for the subnet group.\n
:type SubnetIds: list
:param SubnetIds: [REQUIRED]\nAn array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request.\n\n(string) --\n\n
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ClusterSubnetGroup': {
'ClusterSubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string',
'SupportedPlatforms': [
{
'Name': 'string'
},
]
},
'SubnetStatus': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
ClusterSubnetGroup (dict) --
Describes a subnet group.
ClusterSubnetGroupName (string) --
The name of the cluster subnet group.
Description (string) --
The description of the cluster subnet group.
VpcId (string) --
The VPC ID of the cluster subnet group.
SubnetGroupStatus (string) --
The status of the cluster subnet group. Possible values are Complete , Incomplete and Invalid .
Subnets (list) --
A list of the VPC Subnet elements.
(dict) --
Describes a subnet.
SubnetIdentifier (string) --
The identifier of the subnet.
SubnetAvailabilityZone (dict) --
Name (string) --
The name of the availability zone.
SupportedPlatforms (list) --
(dict) --
A list of supported platforms for orderable clusters.
Name (string) --
SubnetStatus (string) --
The status of the subnet.
Tags (list) --
The list of tags for the cluster subnet group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterSubnetGroupAlreadyExistsFault
Redshift.Client.exceptions.ClusterSubnetGroupQuotaExceededFault
Redshift.Client.exceptions.ClusterSubnetQuotaExceededFault
Redshift.Client.exceptions.InvalidSubnet
Redshift.Client.exceptions.UnauthorizedOperation
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
:return: {
'ClusterSubnetGroup': {
'ClusterSubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string',
'SupportedPlatforms': [
{
'Name': 'string'
},
]
},
'SubnetStatus': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Name (string) --
"""
pass
def create_event_subscription(SubscriptionName=None, SnsTopicArn=None, SourceType=None, SourceIds=None, EventCategories=None, Severity=None, Enabled=None, Tags=None):
"""
Creates an Amazon Redshift event notification subscription. This action requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the source type, and lists of Amazon Redshift source IDs, event categories, and event severities. Notifications will be sent for all events you want that match those criteria. For example, you can specify source type = cluster, source ID = my-cluster-1 and mycluster2, event categories = Availability, Backup, and severity = ERROR. The subscription will only send notifications for those ERROR events in the Availability and Backup categories for the specified clusters.
If you specify both the source type and source IDs, such as source type = cluster and source identifier = my-cluster-1, notifications will be sent for all the cluster events for my-cluster-1. If you specify a source type but do not specify a source identifier, you will receive notice of the events for the objects of that type in your AWS account. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all Amazon Redshift sources belonging to your AWS account. You must specify a source type if you specify a source ID.
See also: AWS API Documentation
Exceptions
:example: response = client.create_event_subscription(
SubscriptionName='string',
SnsTopicArn='string',
SourceType='string',
SourceIds=[
'string',
],
EventCategories=[
'string',
],
Severity='string',
Enabled=True|False,
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type SubscriptionName: string
:param SubscriptionName: [REQUIRED]\nThe name of the event subscription to be created.\nConstraints:\n\nCannot be null, empty, or blank.\nMust contain from 1 to 255 alphanumeric characters or hyphens.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:type SnsTopicArn: string
:param SnsTopicArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit the event notifications. The ARN is created by Amazon SNS when you create a topic and subscribe to it.\n
:type SourceType: string
:param SourceType: The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.\nValid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.\n
:type SourceIds: list
:param SourceIds: A list of one or more identifiers of Amazon Redshift source objects. All of the objects must be of the same type as was specified in the source type parameter. The event subscription will return only events generated by the specified objects. If not specified, then events are returned for all objects within the source type specified.\nExample: my-cluster-1, my-cluster-2\nExample: my-snapshot-20131010\n\n(string) --\n\n
:type EventCategories: list
:param EventCategories: Specifies the Amazon Redshift event categories to be published by the event notification subscription.\nValues: configuration, management, monitoring, security\n\n(string) --\n\n
:type Severity: string
:param Severity: Specifies the Amazon Redshift event severity to be published by the event notification subscription.\nValues: ERROR, INFO\n
:type Enabled: boolean
:param Enabled: A boolean value; set to true to activate the subscription, and set to false to create the subscription but not activate it.
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'EventSubscription': {
'CustomerAwsId': 'string',
'CustSubscriptionId': 'string',
'SnsTopicArn': 'string',
'Status': 'string',
'SubscriptionCreationTime': datetime(2015, 1, 1),
'SourceType': 'string',
'SourceIdsList': [
'string',
],
'EventCategoriesList': [
'string',
],
'Severity': 'string',
'Enabled': True|False,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
EventSubscription (dict) --
Describes event subscriptions.
CustomerAwsId (string) --
The AWS customer account associated with the Amazon Redshift event notification subscription.
CustSubscriptionId (string) --
The name of the Amazon Redshift event notification subscription.
SnsTopicArn (string) --
The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event notification subscription.
Status (string) --
The status of the Amazon Redshift event notification subscription.
Constraints:
Can be one of the following: active | no-permission | topic-not-exist
The status "no-permission" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.
SubscriptionCreationTime (datetime) --
The date and time the Amazon Redshift event notification subscription was created.
SourceType (string) --
The source type of the events returned by the Amazon Redshift event notification, such as cluster, cluster-snapshot, cluster-parameter-group, cluster-security-group, or scheduled-action.
SourceIdsList (list) --
A list of the sources that publish events to the Amazon Redshift event notification subscription.
(string) --
EventCategoriesList (list) --
The list of Amazon Redshift event categories specified in the event notification subscription.
Values: Configuration, Management, Monitoring, Security
(string) --
Severity (string) --
The event severity specified in the Amazon Redshift event notification subscription.
Values: ERROR, INFO
Enabled (boolean) --
A boolean value indicating whether the subscription is enabled; true indicates that the subscription is enabled.
Tags (list) --
The list of tags for the event subscription.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.EventSubscriptionQuotaExceededFault
Redshift.Client.exceptions.SubscriptionAlreadyExistFault
Redshift.Client.exceptions.SNSInvalidTopicFault
Redshift.Client.exceptions.SNSNoAuthorizationFault
Redshift.Client.exceptions.SNSTopicArnNotFoundFault
Redshift.Client.exceptions.SubscriptionEventIdNotFoundFault
Redshift.Client.exceptions.SubscriptionCategoryNotFoundFault
Redshift.Client.exceptions.SubscriptionSeverityNotFoundFault
Redshift.Client.exceptions.SourceNotFoundFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'EventSubscription': {
'CustomerAwsId': 'string',
'CustSubscriptionId': 'string',
'SnsTopicArn': 'string',
'Status': 'string',
'SubscriptionCreationTime': datetime(2015, 1, 1),
'SourceType': 'string',
'SourceIdsList': [
'string',
],
'EventCategoriesList': [
'string',
],
'Severity': 'string',
'Enabled': True|False,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Can be one of the following: active | no-permission | topic-not-exist
The status "no-permission" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.
"""
pass
def create_hsm_client_certificate(HsmClientCertificateIdentifier=None, Tags=None):
"""
Creates an HSM client certificate that an Amazon Redshift cluster will use to connect to the client\'s HSM in order to store and retrieve the keys used to encrypt the cluster databases.
The command returns a public key, which you must store in the HSM. In addition to creating the HSM certificate, you must create an Amazon Redshift HSM configuration that provides a cluster the information needed to store and use encryption keys in the HSM. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.
See also: AWS API Documentation
Exceptions
:example: response = client.create_hsm_client_certificate(
HsmClientCertificateIdentifier='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type HsmClientCertificateIdentifier: string
:param HsmClientCertificateIdentifier: [REQUIRED]\nThe identifier to be assigned to the new HSM client certificate that the cluster will use to connect to the HSM to use the database encryption keys.\n
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'HsmClientCertificate': {
'HsmClientCertificateIdentifier': 'string',
'HsmClientCertificatePublicKey': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
HsmClientCertificate (dict) --
Returns information about an HSM client certificate. The certificate is stored in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift cluster to encrypt data files.
HsmClientCertificateIdentifier (string) --
The identifier of the HSM client certificate.
HsmClientCertificatePublicKey (string) --
The public key that the Amazon Redshift cluster will use to connect to the HSM. You must register the public key in the HSM.
Tags (list) --
The list of tags for the HSM client certificate.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.HsmClientCertificateAlreadyExistsFault
Redshift.Client.exceptions.HsmClientCertificateQuotaExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'HsmClientCertificate': {
'HsmClientCertificateIdentifier': 'string',
'HsmClientCertificatePublicKey': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Redshift.Client.exceptions.HsmClientCertificateAlreadyExistsFault
Redshift.Client.exceptions.HsmClientCertificateQuotaExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def create_hsm_configuration(HsmConfigurationIdentifier=None, Description=None, HsmIpAddress=None, HsmPartitionName=None, HsmPartitionPassword=None, HsmServerPublicCertificate=None, Tags=None):
"""
Creates an HSM configuration that contains the information required by an Amazon Redshift cluster to store and use database encryption keys in a Hardware Security Module (HSM). After creating the HSM configuration, you can specify it as a parameter when creating a cluster. The cluster will then store its encryption keys in the HSM.
In addition to creating an HSM configuration, you must also create an HSM client certificate. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.
See also: AWS API Documentation
Exceptions
:example: response = client.create_hsm_configuration(
HsmConfigurationIdentifier='string',
Description='string',
HsmIpAddress='string',
HsmPartitionName='string',
HsmPartitionPassword='string',
HsmServerPublicCertificate='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: [REQUIRED]\nThe identifier to be assigned to the new Amazon Redshift HSM configuration.\n
:type Description: string
:param Description: [REQUIRED]\nA text description of the HSM configuration to be created.\n
:type HsmIpAddress: string
:param HsmIpAddress: [REQUIRED]\nThe IP address that the Amazon Redshift cluster must use to access the HSM.\n
:type HsmPartitionName: string
:param HsmPartitionName: [REQUIRED]\nThe name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.\n
:type HsmPartitionPassword: string
:param HsmPartitionPassword: [REQUIRED]\nThe password required to access the HSM partition.\n
:type HsmServerPublicCertificate: string
:param HsmServerPublicCertificate: [REQUIRED]\nThe HSMs public certificate file. When using Cloud HSM, the file name is server.pem.\n
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'HsmConfiguration': {
'HsmConfigurationIdentifier': 'string',
'Description': 'string',
'HsmIpAddress': 'string',
'HsmPartitionName': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
HsmConfiguration (dict) --
Returns information about an HSM configuration, which is an object that describes to Amazon Redshift clusters the information they require to connect to an HSM where they can store database encryption keys.
HsmConfigurationIdentifier (string) --
The name of the Amazon Redshift HSM configuration.
Description (string) --
A text description of the HSM configuration.
HsmIpAddress (string) --
The IP address that the Amazon Redshift cluster must use to access the HSM.
HsmPartitionName (string) --
The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
Tags (list) --
The list of tags for the HSM configuration.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.HsmConfigurationAlreadyExistsFault
Redshift.Client.exceptions.HsmConfigurationQuotaExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'HsmConfiguration': {
'HsmConfigurationIdentifier': 'string',
'Description': 'string',
'HsmIpAddress': 'string',
'HsmPartitionName': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Redshift.Client.exceptions.HsmConfigurationAlreadyExistsFault
Redshift.Client.exceptions.HsmConfigurationQuotaExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def create_scheduled_action(ScheduledActionName=None, TargetAction=None, Schedule=None, IamRole=None, ScheduledActionDescription=None, StartTime=None, EndTime=None, Enable=None):
"""
Creates a scheduled action. A scheduled action contains a schedule and an Amazon Redshift API action. For example, you can create a schedule of when to run the ResizeCluster API operation.
See also: AWS API Documentation
Exceptions
:example: response = client.create_scheduled_action(
ScheduledActionName='string',
TargetAction={
'ResizeCluster': {
'ClusterIdentifier': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'Classic': True|False
},
'PauseCluster': {
'ClusterIdentifier': 'string'
},
'ResumeCluster': {
'ClusterIdentifier': 'string'
}
},
Schedule='string',
IamRole='string',
ScheduledActionDescription='string',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Enable=True|False
)
:type ScheduledActionName: string
:param ScheduledActionName: [REQUIRED]\nThe name of the scheduled action. The name must be unique within an account. For more information about this parameter, see ScheduledAction .\n
:type TargetAction: dict
:param TargetAction: [REQUIRED]\nA JSON format string of the Amazon Redshift API operation with input parameters. For more information about this parameter, see ScheduledAction .\n\nResizeCluster (dict) --An action that runs a ResizeCluster API operation.\n\nClusterIdentifier (string) -- [REQUIRED]The unique identifier for the cluster to resize.\n\nClusterType (string) --The new cluster type for the specified cluster.\n\nNodeType (string) --The new node type for the nodes you are adding. If not specified, the cluster\'s current node type is used.\n\nNumberOfNodes (integer) --The new number of nodes for the cluster.\n\nClassic (boolean) --A boolean value indicating whether the resize operation is using the classic resize process. If you don\'t provide this parameter or set the value to false , the resize type is elastic.\n\n\n\nPauseCluster (dict) --An action that runs a PauseCluster API operation.\n\nClusterIdentifier (string) -- [REQUIRED]The identifier of the cluster to be paused.\n\n\n\nResumeCluster (dict) --An action that runs a ResumeCluster API operation.\n\nClusterIdentifier (string) -- [REQUIRED]The identifier of the cluster to be resumed.\n\n\n\n\n
:type Schedule: string
:param Schedule: [REQUIRED]\nThe schedule in at( ) or cron( ) format. For more information about this parameter, see ScheduledAction .\n
:type IamRole: string
:param IamRole: [REQUIRED]\nThe IAM role to assume to run the target action. For more information about this parameter, see ScheduledAction .\n
:type ScheduledActionDescription: string
:param ScheduledActionDescription: The description of the scheduled action.
:type StartTime: datetime
:param StartTime: The start time in UTC of the scheduled action. Before this time, the scheduled action does not trigger. For more information about this parameter, see ScheduledAction .
:type EndTime: datetime
:param EndTime: The end time in UTC of the scheduled action. After this time, the scheduled action does not trigger. For more information about this parameter, see ScheduledAction .
:type Enable: boolean
:param Enable: If true, the schedule is enabled. If false, the scheduled action does not trigger. For more information about state of the scheduled action, see ScheduledAction .
:rtype: dict
ReturnsResponse Syntax
{
'ScheduledActionName': 'string',
'TargetAction': {
'ResizeCluster': {
'ClusterIdentifier': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'Classic': True|False
},
'PauseCluster': {
'ClusterIdentifier': 'string'
},
'ResumeCluster': {
'ClusterIdentifier': 'string'
}
},
'Schedule': 'string',
'IamRole': 'string',
'ScheduledActionDescription': 'string',
'State': 'ACTIVE'|'DISABLED',
'NextInvocations': [
datetime(2015, 1, 1),
],
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
}
Response Structure
(dict) --
Describes a scheduled action. You can use a scheduled action to trigger some Amazon Redshift API operations on a schedule. For information about which API operations can be scheduled, see ScheduledActionType .
ScheduledActionName (string) --
The name of the scheduled action.
TargetAction (dict) --
A JSON format string of the Amazon Redshift API operation with input parameters.
"{\\"ResizeCluster\\":{\\"NodeType\\":\\"ds2.8xlarge\\",\\"ClusterIdentifier\\":\\"my-test-cluster\\",\\"NumberOfNodes\\":3}} ".
ResizeCluster (dict) --
An action that runs a ResizeCluster API operation.
ClusterIdentifier (string) --
The unique identifier for the cluster to resize.
ClusterType (string) --
The new cluster type for the specified cluster.
NodeType (string) --
The new node type for the nodes you are adding. If not specified, the cluster\'s current node type is used.
NumberOfNodes (integer) --
The new number of nodes for the cluster.
Classic (boolean) --
A boolean value indicating whether the resize operation is using the classic resize process. If you don\'t provide this parameter or set the value to false , the resize type is elastic.
PauseCluster (dict) --
An action that runs a PauseCluster API operation.
ClusterIdentifier (string) --
The identifier of the cluster to be paused.
ResumeCluster (dict) --
An action that runs a ResumeCluster API operation.
ClusterIdentifier (string) --
The identifier of the cluster to be resumed.
Schedule (string) --
The schedule for a one-time (at format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour.
Format of at expressions is "at(yyyy-mm-ddThh:mm:ss) ". For example, "at(2016-03-04T17:27:00) ".
Format of cron expressions is "cron(Minutes Hours Day-of-month Month Day-of-week Year) ". For example, "cron(0 10 ? * MON *) ". For more information, see Cron Expressions in the Amazon CloudWatch Events User Guide .
IamRole (string) --
The IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide .
ScheduledActionDescription (string) --
The description of the scheduled action.
State (string) --
The state of the scheduled action. For example, DISABLED .
NextInvocations (list) --
List of times when the scheduled action will run.
(datetime) --
StartTime (datetime) --
The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger.
EndTime (datetime) --
The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.
Exceptions
Redshift.Client.exceptions.ScheduledActionAlreadyExistsFault
Redshift.Client.exceptions.ScheduledActionQuotaExceededFault
Redshift.Client.exceptions.ScheduledActionTypeUnsupportedFault
Redshift.Client.exceptions.InvalidScheduleFault
Redshift.Client.exceptions.InvalidScheduledActionFault
Redshift.Client.exceptions.UnauthorizedOperation
:return: {
'ScheduledActionName': 'string',
'TargetAction': {
'ResizeCluster': {
'ClusterIdentifier': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'Classic': True|False
},
'PauseCluster': {
'ClusterIdentifier': 'string'
},
'ResumeCluster': {
'ClusterIdentifier': 'string'
}
},
'Schedule': 'string',
'IamRole': 'string',
'ScheduledActionDescription': 'string',
'State': 'ACTIVE'|'DISABLED',
'NextInvocations': [
datetime(2015, 1, 1),
],
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
}
:returns:
(datetime) --
"""
pass
def create_snapshot_copy_grant(SnapshotCopyGrantName=None, KmsKeyId=None, Tags=None):
"""
Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied snapshots in a destination region.
For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_snapshot_copy_grant(
SnapshotCopyGrantName='string',
KmsKeyId='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type SnapshotCopyGrantName: string
:param SnapshotCopyGrantName: [REQUIRED]\nThe name of the snapshot copy grant. This name must be unique in the region for the AWS account.\nConstraints:\n\nMust contain from 1 to 63 alphanumeric characters or hyphens.\nAlphabetic characters must be lowercase.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\nMust be unique for all clusters within an AWS account.\n\n
:type KmsKeyId: string
:param KmsKeyId: The unique identifier of the customer master key (CMK) to which to grant Amazon Redshift permission. If no key is specified, the default key is used.
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'SnapshotCopyGrant': {
'SnapshotCopyGrantName': 'string',
'KmsKeyId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
SnapshotCopyGrant (dict) --
The snapshot copy grant that grants Amazon Redshift permission to encrypt copied snapshots with the specified customer master key (CMK) from AWS KMS in the destination region.
For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide .
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
KmsKeyId (string) --
The unique identifier of the customer master key (CMK) in AWS KMS to which Amazon Redshift is granted permission.
Tags (list) --
A list of tag instances.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.SnapshotCopyGrantAlreadyExistsFault
Redshift.Client.exceptions.SnapshotCopyGrantQuotaExceededFault
Redshift.Client.exceptions.LimitExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
:return: {
'SnapshotCopyGrant': {
'SnapshotCopyGrantName': 'string',
'KmsKeyId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Redshift.Client.exceptions.SnapshotCopyGrantAlreadyExistsFault
Redshift.Client.exceptions.SnapshotCopyGrantQuotaExceededFault
Redshift.Client.exceptions.LimitExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
"""
pass
def create_snapshot_schedule(ScheduleDefinitions=None, ScheduleIdentifier=None, ScheduleDescription=None, Tags=None, DryRun=None, NextInvocations=None):
"""
Create a snapshot schedule that can be associated to a cluster and which overrides the default system backup schedule.
See also: AWS API Documentation
Exceptions
:example: response = client.create_snapshot_schedule(
ScheduleDefinitions=[
'string',
],
ScheduleIdentifier='string',
ScheduleDescription='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
DryRun=True|False,
NextInvocations=123
)
:type ScheduleDefinitions: list
:param ScheduleDefinitions: The definition of the snapshot schedule. The definition is made up of schedule expressions, for example 'cron(30 12 *)' or 'rate(12 hours)'.\n\n(string) --\n\n
:type ScheduleIdentifier: string
:param ScheduleIdentifier: A unique identifier for a snapshot schedule. Only alphanumeric characters are allowed for the identifier.
:type ScheduleDescription: string
:param ScheduleDescription: The description of the snapshot schedule.
:type Tags: list
:param Tags: An optional set of tags you can use to search for the schedule.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:type DryRun: boolean
:param DryRun:
:type NextInvocations: integer
:param NextInvocations:
:rtype: dict
ReturnsResponse Syntax
{
'ScheduleDefinitions': [
'string',
],
'ScheduleIdentifier': 'string',
'ScheduleDescription': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextInvocations': [
datetime(2015, 1, 1),
],
'AssociatedClusterCount': 123,
'AssociatedClusters': [
{
'ClusterIdentifier': 'string',
'ScheduleAssociationState': 'MODIFYING'|'ACTIVE'|'FAILED'
},
]
}
Response Structure
(dict) --
Describes a snapshot schedule. You can set a regular interval for creating snapshots of a cluster. You can also schedule snapshots for specific dates.
ScheduleDefinitions (list) --
A list of ScheduleDefinitions.
(string) --
ScheduleIdentifier (string) --
A unique identifier for the schedule.
ScheduleDescription (string) --
The description of the schedule.
Tags (list) --
An optional set of tags describing the schedule.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
NextInvocations (list) --
(datetime) --
AssociatedClusterCount (integer) --
The number of clusters associated with the schedule.
AssociatedClusters (list) --
A list of clusters associated with the schedule. A maximum of 100 clusters is returned.
(dict) --
ClusterIdentifier (string) --
ScheduleAssociationState (string) --
Exceptions
Redshift.Client.exceptions.SnapshotScheduleAlreadyExistsFault
Redshift.Client.exceptions.InvalidScheduleFault
Redshift.Client.exceptions.SnapshotScheduleQuotaExceededFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.ScheduleDefinitionTypeUnsupportedFault
:return: {
'ScheduleDefinitions': [
'string',
],
'ScheduleIdentifier': 'string',
'ScheduleDescription': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextInvocations': [
datetime(2015, 1, 1),
],
'AssociatedClusterCount': 123,
'AssociatedClusters': [
{
'ClusterIdentifier': 'string',
'ScheduleAssociationState': 'MODIFYING'|'ACTIVE'|'FAILED'
},
]
}
:returns:
(string) --
"""
pass
def create_tags(ResourceName=None, Tags=None):
"""
Adds tags to a cluster.
A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.
If you specify a key that already exists for the resource, the value for that key will be updated with the new value.
See also: AWS API Documentation
Exceptions
:example: response = client.create_tags(
ResourceName='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceName: string
:param ResourceName: [REQUIRED]\nThe Amazon Resource Name (ARN) to which you want to add the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1 .\n
:type Tags: list
:param Tags: [REQUIRED]\nOne or more name/value pairs to add as tags to the specified resource. Each tag name is passed in with the parameter Key and the corresponding value is passed in with the parameter Value . The Key and Value parameters are separated by a comma (,). Separate multiple tags with a space. For example, --tags 'Key'='owner','Value'='admin' 'Key'='environment','Value'='test' 'Key'='version','Value'='1.0' .\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:returns:
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.ResourceNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def create_usage_limit(ClusterIdentifier=None, FeatureType=None, LimitType=None, Amount=None, Period=None, BreachAction=None, Tags=None):
"""
Creates a usage limit for a specified Amazon Redshift feature on a cluster. The usage limit is identified by the returned usage limit identifier.
See also: AWS API Documentation
Exceptions
:example: response = client.create_usage_limit(
ClusterIdentifier='string',
FeatureType='spectrum'|'concurrency-scaling',
LimitType='time'|'data-scanned',
Amount=123,
Period='daily'|'weekly'|'monthly',
BreachAction='log'|'emit-metric'|'disable',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the cluster that you want to limit usage.\n
:type FeatureType: string
:param FeatureType: [REQUIRED]\nThe Amazon Redshift feature that you want to limit.\n
:type LimitType: string
:param LimitType: [REQUIRED]\nThe type of limit. Depending on the feature type, this can be based on a time duration or data size. If FeatureType is spectrum , then LimitType must be data-scanned . If FeatureType is concurrency-scaling , then LimitType must be time .\n
:type Amount: integer
:param Amount: [REQUIRED]\nThe limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB). The value must be a positive number.\n
:type Period: string
:param Period: The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly .
:type BreachAction: string
:param BreachAction: The action that Amazon Redshift takes when the limit is reached. The default is log. For more information about this parameter, see UsageLimit .
:type Tags: list
:param Tags: A list of tag instances.\n\n(dict) --A tag consisting of a name/value pair for a resource.\n\nKey (string) --The key, or name, for the resource tag.\n\nValue (string) --The value for the resource tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'UsageLimitId': 'string',
'ClusterIdentifier': 'string',
'FeatureType': 'spectrum'|'concurrency-scaling',
'LimitType': 'time'|'data-scanned',
'Amount': 123,
'Period': 'daily'|'weekly'|'monthly',
'BreachAction': 'log'|'emit-metric'|'disable',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
Response Structure
(dict) --
Describes a usage limit object for a cluster.
UsageLimitId (string) --
The identifier of the usage limit.
ClusterIdentifier (string) --
The identifier of the cluster with a usage limit.
FeatureType (string) --
The Amazon Redshift feature to which the limit applies.
LimitType (string) --
The type of limit. Depending on the feature type, this can be based on a time duration or data size.
Amount (integer) --
The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).
Period (string) --
The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly .
BreachAction (string) --
The action that Amazon Redshift takes when the limit is reached. Possible values are:
log - To log an event in a system table. The default is log.
emit-metric - To emit CloudWatch metrics.
disable - To disable the feature until the next usage period begins.
Tags (list) --
A list of tag instances.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.LimitExceededFault
Redshift.Client.exceptions.UsageLimitAlreadyExistsFault
Redshift.Client.exceptions.InvalidUsageLimitFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.UnsupportedOperationFault
:return: {
'UsageLimitId': 'string',
'ClusterIdentifier': 'string',
'FeatureType': 'spectrum'|'concurrency-scaling',
'LimitType': 'time'|'data-scanned',
'Amount': 123,
'Period': 'daily'|'weekly'|'monthly',
'BreachAction': 'log'|'emit-metric'|'disable',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
log - To log an event in a system table. The default is log.
emit-metric - To emit CloudWatch metrics.
disable - To disable the feature until the next usage period begins.
"""
pass
def delete_cluster(ClusterIdentifier=None, SkipFinalClusterSnapshot=None, FinalClusterSnapshotIdentifier=None, FinalClusterSnapshotRetentionPeriod=None):
"""
Deletes a previously provisioned cluster without its final snapshot being created. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier . You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be "final-snapshot" while the snapshot is being taken, then it\'s "deleting" once Amazon Redshift begins deleting the cluster.
For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_cluster(
ClusterIdentifier='string',
SkipFinalClusterSnapshot=True|False,
FinalClusterSnapshotIdentifier='string',
FinalClusterSnapshotRetentionPeriod=123
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the cluster to be deleted.\nConstraints:\n\nMust contain lowercase characters.\nMust contain from 1 to 63 alphanumeric characters or hyphens.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:type SkipFinalClusterSnapshot: boolean
:param SkipFinalClusterSnapshot: Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted.\n\nNote\nThe FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot is false .\n\nDefault: false\n
:type FinalClusterSnapshotIdentifier: string
:param FinalClusterSnapshotIdentifier: The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot must be false .\nConstraints:\n\nMust be 1 to 255 alphanumeric characters.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:type FinalClusterSnapshotRetentionPeriod: integer
:param FinalClusterSnapshotRetentionPeriod: The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.\nThe value must be either -1 or an integer between 1 and 3,653.\nThe default value is -1.\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.ClusterSnapshotAlreadyExistsFault
Redshift.Client.exceptions.ClusterSnapshotQuotaExceededFault
Redshift.Client.exceptions.InvalidRetentionPeriodFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def delete_cluster_parameter_group(ParameterGroupName=None):
"""
Deletes a specified Amazon Redshift parameter group.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_cluster_parameter_group(
ParameterGroupName='string'
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of the parameter group to be deleted.\nConstraints:\n\nMust be the name of an existing cluster parameter group.\nCannot delete a default cluster parameter group.\n\n
:returns:
Redshift.Client.exceptions.InvalidClusterParameterGroupStateFault
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
"""
pass
def delete_cluster_security_group(ClusterSecurityGroupName=None):
"""
Deletes an Amazon Redshift security group.
For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_cluster_security_group(
ClusterSecurityGroupName='string'
)
:type ClusterSecurityGroupName: string
:param ClusterSecurityGroupName: [REQUIRED]\nThe name of the cluster security group to be deleted.\n
"""
pass
def delete_cluster_snapshot(SnapshotIdentifier=None, SnapshotClusterIdentifier=None):
"""
Deletes the specified manual snapshot. The snapshot must be in the available state, with no other users authorized to access the snapshot.
Unlike automated snapshots, manual snapshots are retained even after you delete your cluster. Amazon Redshift does not delete your manual snapshots. You must delete manual snapshot explicitly to avoid getting charged. If other accounts are authorized to access the snapshot, you must revoke all of the authorizations before you can delete the snapshot.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_cluster_snapshot(
SnapshotIdentifier='string',
SnapshotClusterIdentifier='string'
)
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]\nThe unique identifier of the manual snapshot to be deleted.\nConstraints: Must be the name of an existing snapshot that is in the available , failed , or cancelled state.\n
:type SnapshotClusterIdentifier: string
:param SnapshotClusterIdentifier: The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.\nConstraints: Must be the name of valid cluster.\n
:rtype: dict
ReturnsResponse Syntax
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Snapshot (dict) --
Describes a snapshot.
SnapshotIdentifier (string) --
The snapshot identifier that is provided in the request.
ClusterIdentifier (string) --
The identifier of the cluster for which the snapshot was taken.
SnapshotCreateTime (datetime) --
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
Status (string) --
The snapshot status. The value of the status depends on the API operation used:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
Port (integer) --
The port that the cluster is listening on.
AvailabilityZone (string) --
The Availability Zone in which the cluster was created.
ClusterCreateTime (datetime) --
The time (UTC) when the cluster was originally created.
MasterUsername (string) --
The master user name for the cluster.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
SnapshotType (string) --
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
NodeType (string) --
The node type of the nodes in the cluster.
NumberOfNodes (integer) --
The number of nodes in the cluster.
DBName (string) --
The name of the database that was created when the cluster was created.
VpcId (string) --
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
Encrypted (boolean) --
If true , the data in the snapshot is encrypted at rest.
KmsKeyId (string) --
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
EncryptedWithHSM (boolean) --
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.
AccountsWithRestoreAccess (list) --
A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.
(dict) --
Describes an AWS customer account authorized to restore a snapshot.
AccountId (string) --
The identifier of an AWS customer account authorized to restore a snapshot.
AccountAlias (string) --
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support .
OwnerAccount (string) --
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
TotalBackupSizeInMegaBytes (float) --
The size of the complete set of backup data that would be used to restore the cluster.
ActualIncrementalBackupSizeInMegaBytes (float) --
The size of the incremental backup.
BackupProgressInMegaBytes (float) --
The number of megabytes that have been transferred to the snapshot backup.
CurrentBackupRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.
EstimatedSecondsToCompletion (integer) --
The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
SourceRegion (string) --
The source region from which the snapshot was copied.
Tags (list) --
The list of tags for the cluster snapshot.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
RestorableNodeTypes (list) --
The list of node types that this cluster snapshot is able to restore into.
(string) --
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track for the snapshot.
ManualSnapshotRetentionPeriod (integer) --
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
ManualSnapshotRemainingDays (integer) --
The number of days until a manual snapshot will pass its retention period.
SnapshotRetentionStartTime (datetime) --
A timestamp representing the start of the retention period for the snapshot.
Exceptions
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
:return: {
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def delete_cluster_subnet_group(ClusterSubnetGroupName=None):
"""
Deletes the specified cluster subnet group.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_cluster_subnet_group(
ClusterSubnetGroupName='string'
)
:type ClusterSubnetGroupName: string
:param ClusterSubnetGroupName: [REQUIRED]\nThe name of the cluster subnet group name to be deleted.\n
"""
pass
def delete_event_subscription(SubscriptionName=None):
"""
Deletes an Amazon Redshift event notification subscription.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_event_subscription(
SubscriptionName='string'
)
:type SubscriptionName: string
:param SubscriptionName: [REQUIRED]\nThe name of the Amazon Redshift event notification subscription to be deleted.\n
"""
pass
def delete_hsm_client_certificate(HsmClientCertificateIdentifier=None):
"""
Deletes the specified HSM client certificate.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_hsm_client_certificate(
HsmClientCertificateIdentifier='string'
)
:type HsmClientCertificateIdentifier: string
:param HsmClientCertificateIdentifier: [REQUIRED]\nThe identifier of the HSM client certificate to be deleted.\n
"""
pass
def delete_hsm_configuration(HsmConfigurationIdentifier=None):
"""
Deletes the specified Amazon Redshift HSM configuration.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_hsm_configuration(
HsmConfigurationIdentifier='string'
)
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: [REQUIRED]\nThe identifier of the Amazon Redshift HSM configuration to be deleted.\n
"""
pass
def delete_scheduled_action(ScheduledActionName=None):
"""
Deletes a scheduled action.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_scheduled_action(
ScheduledActionName='string'
)
:type ScheduledActionName: string
:param ScheduledActionName: [REQUIRED]\nThe name of the scheduled action to delete.\n
"""
pass
def delete_snapshot_copy_grant(SnapshotCopyGrantName=None):
"""
Deletes the specified snapshot copy grant.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_snapshot_copy_grant(
SnapshotCopyGrantName='string'
)
:type SnapshotCopyGrantName: string
:param SnapshotCopyGrantName: [REQUIRED]\nThe name of the snapshot copy grant to delete.\n
"""
pass
def delete_snapshot_schedule(ScheduleIdentifier=None):
"""
Deletes a snapshot schedule.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_snapshot_schedule(
ScheduleIdentifier='string'
)
:type ScheduleIdentifier: string
:param ScheduleIdentifier: [REQUIRED]\nA unique identifier of the snapshot schedule to delete.\n
"""
pass
def delete_tags(ResourceName=None, TagKeys=None):
"""
Deletes tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_tags(
ResourceName='string',
TagKeys=[
'string',
]
)
:type ResourceName: string
:param ResourceName: [REQUIRED]\nThe Amazon Resource Name (ARN) from which you want to remove the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1 .\n
:type TagKeys: list
:param TagKeys: [REQUIRED]\nThe tag key that you want to delete.\n\n(string) --\n\n
:returns:
Redshift.Client.exceptions.ResourceNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def delete_usage_limit(UsageLimitId=None):
"""
Deletes a usage limit from a cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_usage_limit(
UsageLimitId='string'
)
:type UsageLimitId: string
:param UsageLimitId: [REQUIRED]\nThe identifier of the usage limit to delete.\n
"""
pass
def describe_account_attributes(AttributeNames=None):
"""
Returns a list of attributes attached to an account
See also: AWS API Documentation
:example: response = client.describe_account_attributes(
AttributeNames=[
'string',
]
)
:type AttributeNames: list
:param AttributeNames: A list of attribute names.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'AccountAttributes': [
{
'AttributeName': 'string',
'AttributeValues': [
{
'AttributeValue': 'string'
},
]
},
]
}
Response Structure
(dict) --
AccountAttributes (list) --A list of attributes assigned to an account.
(dict) --A name value pair that describes an aspect of an account.
AttributeName (string) --The name of the attribute.
AttributeValues (list) --A list of attribute values.
(dict) --Describes an attribute value.
AttributeValue (string) --The value of the attribute.
:return: {
'AccountAttributes': [
{
'AttributeName': 'string',
'AttributeValues': [
{
'AttributeValue': 'string'
},
]
},
]
}
"""
pass
def describe_cluster_db_revisions(ClusterIdentifier=None, MaxRecords=None, Marker=None):
"""
Returns an array of ClusterDbRevision objects.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_cluster_db_revisions(
ClusterIdentifier='string',
MaxRecords=123,
Marker='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: A unique identifier for a cluster whose ClusterDbRevisions you are requesting. This parameter is case sensitive. All clusters defined for an account are returned by default.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point for returning a set of response records. When the results of a DescribeClusterDbRevisions request exceed the value specified in MaxRecords , Amazon Redshift returns a value in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.\nConstraints: You can specify either the ClusterIdentifier parameter, or the marker parameter, but not both.\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ClusterDbRevisions': [
{
'ClusterIdentifier': 'string',
'CurrentDatabaseRevision': 'string',
'DatabaseRevisionReleaseDate': datetime(2015, 1, 1),
'RevisionTargets': [
{
'DatabaseRevision': 'string',
'Description': 'string',
'DatabaseRevisionReleaseDate': datetime(2015, 1, 1)
},
]
},
]
}
Response Structure
(dict) --
Marker (string) --
A string representing the starting point for the next set of revisions. If a value is returned in a response, you can retrieve the next set of revisions by providing the value in the marker parameter and retrying the command. If the marker field is empty, all revisions have already been returned.
ClusterDbRevisions (list) --
A list of revisions.
(dict) --
Describes a ClusterDbRevision .
ClusterIdentifier (string) --
The unique identifier of the cluster.
CurrentDatabaseRevision (string) --
A string representing the current cluster version.
DatabaseRevisionReleaseDate (datetime) --
The date on which the database revision was released.
RevisionTargets (list) --
A list of RevisionTarget objects, where each object describes the database revision that a cluster can be updated to.
(dict) --
Describes a RevisionTarget .
DatabaseRevision (string) --
A unique string that identifies the version to update the cluster to. You can use this value in ModifyClusterDbRevision .
Description (string) --
A string that describes the changes and features that will be applied to the cluster when it is updated to the corresponding ClusterDbRevision .
DatabaseRevisionReleaseDate (datetime) --
The date on which the database revision was released.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
:return: {
'Marker': 'string',
'ClusterDbRevisions': [
{
'ClusterIdentifier': 'string',
'CurrentDatabaseRevision': 'string',
'DatabaseRevisionReleaseDate': datetime(2015, 1, 1),
'RevisionTargets': [
{
'DatabaseRevision': 'string',
'Description': 'string',
'DatabaseRevisionReleaseDate': datetime(2015, 1, 1)
},
]
},
]
}
:returns:
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
"""
pass
def describe_cluster_parameter_groups(ParameterGroupName=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns a list of Amazon Redshift parameter groups, including parameter groups you created and the default parameter group. For each parameter group, the response includes the parameter group name, description, and parameter group family name. You can optionally specify a name to retrieve the description of a specific parameter group.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all parameter groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all parameter groups that have any combination of those values are returned.
If both tag keys and values are omitted from the request, parameter groups are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_cluster_parameter_groups(
ParameterGroupName='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type ParameterGroupName: string
:param ParameterGroupName: The name of a specific parameter group for which to return details. By default, details about all parameter groups and the default parameter group are returned.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameterGroups request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching cluster parameter groups that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching cluster parameter groups that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterGroupFamily': 'string',
'Description': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
Response Structure
(dict) --
Contains the output from the DescribeClusterParameterGroups action.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
ParameterGroups (list) --
A list of ClusterParameterGroup instances. Each instance describes one cluster parameter group.
(dict) --
Describes a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterGroupFamily (string) --
The name of the cluster parameter group family that this cluster parameter group is compatible with.
Description (string) --
The description of the parameter group.
Tags (list) --
The list of tags for the cluster parameter group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'ParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterGroupFamily': 'string',
'Description': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
:returns:
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def describe_cluster_parameters(ParameterGroupName=None, Source=None, MaxRecords=None, Marker=None):
"""
Returns a detailed list of parameters contained within the specified Amazon Redshift parameter group. For each parameter the response includes information such as parameter name, description, data type, value, whether the parameter value is modifiable, and so on.
You can specify source filter to retrieve parameters of only specific type. For example, to retrieve parameters that were modified by a user action such as from ModifyClusterParameterGroup , you can specify source equal to user .
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.describe_cluster_parameters(
ParameterGroupName='string',
Source='string',
MaxRecords=123,
Marker='string'
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of a cluster parameter group for which to return details.\n
:type Source: string
:param Source: The parameter types to return. Specify user to show parameters that are different form the default. Similarly, specify engine-default to show parameters that are the same as the default parameter group.\nDefault: All parameter types returned.\nValid Values: user | engine-default\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameters request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
ReturnsResponse Syntax
{
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'ApplyType': 'static'|'dynamic',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string'
},
],
'Marker': 'string'
}
Response Structure
(dict) --
Contains the output from the DescribeClusterParameters action.
Parameters (list) --
A list of Parameter instances. Each instance lists the parameters of one cluster parameter group.
(dict) --
Describes a parameter in a cluster parameter group.
ParameterName (string) --
The name of the parameter.
ParameterValue (string) --
The value of the parameter.
Description (string) --
A description of the parameter.
Source (string) --
The source of the parameter value, such as "engine-default" or "user".
DataType (string) --
The data type of the parameter.
AllowedValues (string) --
The valid range of values for the parameter.
ApplyType (string) --
Specifies how to apply the WLM configuration parameter. Some properties can be applied dynamically, while other properties require that any associated clusters be rebooted for the configuration changes to be applied. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
IsModifiable (boolean) --
If true , the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.
MinimumEngineVersion (string) --
The earliest engine version to which the parameter can apply.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
Exceptions
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
:return: {
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'ApplyType': 'static'|'dynamic',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string'
},
],
'Marker': 'string'
}
:returns:
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
"""
pass
def describe_cluster_security_groups(ClusterSecurityGroupName=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns information about Amazon Redshift security groups. If the name of a security group is specified, the response will contain only information about only that security group.
For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide .
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all security groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all security groups that have any combination of those values are returned.
If both tag keys and values are omitted from the request, security groups are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_cluster_security_groups(
ClusterSecurityGroupName='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type ClusterSecurityGroupName: string
:param ClusterSecurityGroupName: The name of a cluster security group for which you are requesting details. You can specify either the Marker parameter or a ClusterSecurityGroupName parameter, but not both.\nExample: securitygroup1\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSecurityGroups request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.\nConstraints: You can specify either the ClusterSecurityGroupName parameter or the Marker parameter, but not both.\n
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching cluster security groups that are associated with the specified key or keys. For example, suppose that you have security groups that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the security groups that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching cluster security groups that are associated with the specified tag value or values. For example, suppose that you have security groups that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the security groups that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
Response Structure
(dict) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
ClusterSecurityGroups (list) --
A list of ClusterSecurityGroup instances.
(dict) --
Describes a security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group to which the operation was applied.
Description (string) --
A description of the security group.
EC2SecurityGroups (list) --
A list of EC2 security groups that are permitted to access clusters associated with this cluster security group.
(dict) --
Describes an Amazon EC2 security group.
Status (string) --
The status of the EC2 security group.
EC2SecurityGroupName (string) --
The name of the EC2 Security Group.
EC2SecurityGroupOwnerId (string) --
The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.
Tags (list) --
The list of tags for the EC2 security group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
IPRanges (list) --
A list of IP ranges (CIDR blocks) that are permitted to access clusters associated with this cluster security group.
(dict) --
Describes an IP range used in a security group.
Status (string) --
The status of the IP range, for example, "authorized".
CIDRIP (string) --
The IP range in Classless Inter-Domain Routing (CIDR) notation.
Tags (list) --
The list of tags for the IP range.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Tags (list) --
The list of tags for the cluster security group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
:returns:
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def describe_cluster_snapshots(ClusterIdentifier=None, SnapshotIdentifier=None, SnapshotType=None, StartTime=None, EndTime=None, MaxRecords=None, Marker=None, OwnerAccount=None, TagKeys=None, TagValues=None, ClusterExists=None, SortingEntities=None):
"""
Returns one or more snapshot objects, which contain metadata about your cluster snapshots. By default, this operation returns information about all snapshots of all clusters that are owned by you AWS customer account. No information is returned for snapshots owned by inactive AWS customer accounts.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all snapshots that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all snapshots that have any combination of those values are returned. Only snapshots that you own are returned in the response; shared snapshots are not returned with the tag key and tag value request parameters.
If both tag keys and values are omitted from the request, snapshots are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_cluster_snapshots(
ClusterIdentifier='string',
SnapshotIdentifier='string',
SnapshotType='string',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
MaxRecords=123,
Marker='string',
OwnerAccount='string',
TagKeys=[
'string',
],
TagValues=[
'string',
],
ClusterExists=True|False,
SortingEntities=[
{
'Attribute': 'SOURCE_TYPE'|'TOTAL_SIZE'|'CREATE_TIME',
'SortOrder': 'ASC'|'DESC'
},
]
)
:type ClusterIdentifier: string
:param ClusterIdentifier: The identifier of the cluster which generated the requested snapshots.
:type SnapshotIdentifier: string
:param SnapshotIdentifier: The snapshot identifier of the snapshot about which to return information.
:type SnapshotType: string
:param SnapshotType: The type of snapshots for which you are requesting information. By default, snapshots of all types are returned.\nValid Values: automated | manual\n
:type StartTime: datetime
:param StartTime: A value that requests only snapshots created at or after the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.\nExample: 2012-07-16T18:00:00Z\n
:type EndTime: datetime
:param EndTime: A time value that requests only snapshots created at or before the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.\nExample: 2012-07-16T18:00:00Z\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSnapshots request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type OwnerAccount: string
:param OwnerAccount: The AWS customer account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your AWS customer account, or do not specify the parameter.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching cluster snapshots that are associated with the specified key or keys. For example, suppose that you have snapshots that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching cluster snapshots that are associated with the specified tag value or values. For example, suppose that you have snapshots that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag values associated with them.\n\n(string) --\n\n
:type ClusterExists: boolean
:param ClusterExists: A value that indicates whether to return snapshots only for an existing cluster. You can perform table-level restore only by using a snapshot of an existing cluster, that is, a cluster that has not been deleted. Values for this parameter work as follows:\n\nIf ClusterExists is set to true , ClusterIdentifier is required.\nIf ClusterExists is set to false and ClusterIdentifier isn\'t specified, all snapshots associated with deleted clusters (orphaned snapshots) are returned.\nIf ClusterExists is set to false and ClusterIdentifier is specified for a deleted cluster, snapshots associated with that cluster are returned.\nIf ClusterExists is set to false and ClusterIdentifier is specified for an existing cluster, no snapshots are returned.\n\n
:type SortingEntities: list
:param SortingEntities: \n(dict) --Describes a sorting entity\n\nAttribute (string) -- [REQUIRED]The category for sorting the snapshots.\n\nSortOrder (string) --The order for listing the attributes.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'Snapshots': [
{
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
},
]
}
Response Structure
(dict) --
Contains the output from the DescribeClusterSnapshots action.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
Snapshots (list) --
A list of Snapshot instances.
(dict) --
Describes a snapshot.
SnapshotIdentifier (string) --
The snapshot identifier that is provided in the request.
ClusterIdentifier (string) --
The identifier of the cluster for which the snapshot was taken.
SnapshotCreateTime (datetime) --
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
Status (string) --
The snapshot status. The value of the status depends on the API operation used:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
Port (integer) --
The port that the cluster is listening on.
AvailabilityZone (string) --
The Availability Zone in which the cluster was created.
ClusterCreateTime (datetime) --
The time (UTC) when the cluster was originally created.
MasterUsername (string) --
The master user name for the cluster.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
SnapshotType (string) --
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
NodeType (string) --
The node type of the nodes in the cluster.
NumberOfNodes (integer) --
The number of nodes in the cluster.
DBName (string) --
The name of the database that was created when the cluster was created.
VpcId (string) --
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
Encrypted (boolean) --
If true , the data in the snapshot is encrypted at rest.
KmsKeyId (string) --
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
EncryptedWithHSM (boolean) --
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.
AccountsWithRestoreAccess (list) --
A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.
(dict) --
Describes an AWS customer account authorized to restore a snapshot.
AccountId (string) --
The identifier of an AWS customer account authorized to restore a snapshot.
AccountAlias (string) --
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support .
OwnerAccount (string) --
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
TotalBackupSizeInMegaBytes (float) --
The size of the complete set of backup data that would be used to restore the cluster.
ActualIncrementalBackupSizeInMegaBytes (float) --
The size of the incremental backup.
BackupProgressInMegaBytes (float) --
The number of megabytes that have been transferred to the snapshot backup.
CurrentBackupRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.
EstimatedSecondsToCompletion (integer) --
The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
SourceRegion (string) --
The source region from which the snapshot was copied.
Tags (list) --
The list of tags for the cluster snapshot.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
RestorableNodeTypes (list) --
The list of node types that this cluster snapshot is able to restore into.
(string) --
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track for the snapshot.
ManualSnapshotRetentionPeriod (integer) --
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
ManualSnapshotRemainingDays (integer) --
The number of days until a manual snapshot will pass its retention period.
SnapshotRetentionStartTime (datetime) --
A timestamp representing the start of the retention period for the snapshot.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'Snapshots': [
{
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
},
]
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def describe_cluster_subnet_groups(ClusterSubnetGroupName=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in you AWS account.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.
If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_cluster_subnet_groups(
ClusterSubnetGroupName='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type ClusterSubnetGroupName: string
:param ClusterSubnetGroupName: The name of the cluster subnet group for which information is requested.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSubnetGroups request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching cluster subnet groups that are associated with the specified key or keys. For example, suppose that you have subnet groups that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching cluster subnet groups that are associated with the specified tag value or values. For example, suppose that you have subnet groups that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ClusterSubnetGroups': [
{
'ClusterSubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string',
'SupportedPlatforms': [
{
'Name': 'string'
},
]
},
'SubnetStatus': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
Response Structure
(dict) --
Contains the output from the DescribeClusterSubnetGroups action.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
ClusterSubnetGroups (list) --
A list of ClusterSubnetGroup instances.
(dict) --
Describes a subnet group.
ClusterSubnetGroupName (string) --
The name of the cluster subnet group.
Description (string) --
The description of the cluster subnet group.
VpcId (string) --
The VPC ID of the cluster subnet group.
SubnetGroupStatus (string) --
The status of the cluster subnet group. Possible values are Complete , Incomplete and Invalid .
Subnets (list) --
A list of the VPC Subnet elements.
(dict) --
Describes a subnet.
SubnetIdentifier (string) --
The identifier of the subnet.
SubnetAvailabilityZone (dict) --
Name (string) --
The name of the availability zone.
SupportedPlatforms (list) --
(dict) --
A list of supported platforms for orderable clusters.
Name (string) --
SubnetStatus (string) --
The status of the subnet.
Tags (list) --
The list of tags for the cluster subnet group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterSubnetGroupNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'ClusterSubnetGroups': [
{
'ClusterSubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string',
'SupportedPlatforms': [
{
'Name': 'string'
},
]
},
'SubnetStatus': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
:returns:
Name (string) --
"""
pass
def describe_cluster_tracks(MaintenanceTrackName=None, MaxRecords=None, Marker=None):
"""
Returns a list of all the available maintenance tracks.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_cluster_tracks(
MaintenanceTrackName='string',
MaxRecords=123,
Marker='string'
)
:type MaintenanceTrackName: string
:param MaintenanceTrackName: The name of the maintenance track.
:type MaxRecords: integer
:param MaxRecords: An integer value for the maximum number of maintenance tracks to return.
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterTracks request exceed the value specified in MaxRecords , Amazon Redshift returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
ReturnsResponse Syntax
{
'MaintenanceTracks': [
{
'MaintenanceTrackName': 'string',
'DatabaseVersion': 'string',
'UpdateTargets': [
{
'MaintenanceTrackName': 'string',
'DatabaseVersion': 'string',
'SupportedOperations': [
{
'OperationName': 'string'
},
]
},
]
},
],
'Marker': 'string'
}
Response Structure
(dict) --
MaintenanceTracks (list) --
A list of maintenance tracks output by the DescribeClusterTracks operation.
(dict) --
Defines a maintenance track that determines which Amazon Redshift version to apply during a maintenance window. If the value for MaintenanceTrack is current , the cluster is updated to the most recently certified maintenance release. If the value is trailing , the cluster is updated to the previously certified maintenance release.
MaintenanceTrackName (string) --
The name of the maintenance track. Possible values are current and trailing .
DatabaseVersion (string) --
The version number for the cluster release.
UpdateTargets (list) --
An array of UpdateTarget objects to update with the maintenance track.
(dict) --
A maintenance track that you can switch the current track to.
MaintenanceTrackName (string) --
The name of the new maintenance track.
DatabaseVersion (string) --
The cluster version for the new maintenance track.
SupportedOperations (list) --
A list of operations supported by the maintenance track.
(dict) --
Describes the operations that are allowed on a maintenance track.
OperationName (string) --
A list of the supported operations.
Marker (string) --
The starting point to return a set of response tracklist records. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
Exceptions
Redshift.Client.exceptions.InvalidClusterTrackFault
Redshift.Client.exceptions.UnauthorizedOperation
:return: {
'MaintenanceTracks': [
{
'MaintenanceTrackName': 'string',
'DatabaseVersion': 'string',
'UpdateTargets': [
{
'MaintenanceTrackName': 'string',
'DatabaseVersion': 'string',
'SupportedOperations': [
{
'OperationName': 'string'
},
]
},
]
},
],
'Marker': 'string'
}
:returns:
Redshift.Client.exceptions.InvalidClusterTrackFault
Redshift.Client.exceptions.UnauthorizedOperation
"""
pass
def describe_cluster_versions(ClusterVersion=None, ClusterParameterGroupFamily=None, MaxRecords=None, Marker=None):
"""
Returns descriptions of the available Amazon Redshift cluster versions. You can call this operation even before creating any clusters to learn more about the Amazon Redshift versions. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.describe_cluster_versions(
ClusterVersion='string',
ClusterParameterGroupFamily='string',
MaxRecords=123,
Marker='string'
)
:type ClusterVersion: string
:param ClusterVersion: The specific cluster version to return.\nExample: 1.0\n
:type ClusterParameterGroupFamily: string
:param ClusterParameterGroupFamily: The name of a specific cluster parameter group family to return details for.\nConstraints:\n\nMust be 1 to 255 alphanumeric characters\nFirst character must be a letter\nCannot end with a hyphen or contain two consecutive hyphens\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterVersions request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ClusterVersions': [
{
'ClusterVersion': 'string',
'ClusterParameterGroupFamily': 'string',
'Description': 'string'
},
]
}
Response Structure
(dict) --
Contains the output from the DescribeClusterVersions action.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
ClusterVersions (list) --
A list of Version elements.
(dict) --
Describes a cluster version, including the parameter group family and description of the version.
ClusterVersion (string) --
The version number used by the cluster.
ClusterParameterGroupFamily (string) --
The name of the cluster parameter group family for the cluster.
Description (string) --
The description of the cluster version.
:return: {
'Marker': 'string',
'ClusterVersions': [
{
'ClusterVersion': 'string',
'ClusterParameterGroupFamily': 'string',
'Description': 'string'
},
]
}
"""
pass
def describe_clusters(ClusterIdentifier=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned.
If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_clusters(
ClusterIdentifier='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type ClusterIdentifier: string
:param ClusterIdentifier: The unique identifier of a cluster whose properties you are requesting. This parameter is case sensitive.\nThe default is that all clusters defined for an account are returned.\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusters request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.\nConstraints: You can specify either the ClusterIdentifier parameter or the Marker parameter, but not both.\n
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching clusters that are associated with the specified key or keys. For example, suppose that you have clusters that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching clusters that are associated with the specified tag value or values. For example, suppose that you have clusters that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'Clusters': [
{
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
},
]
}
Response Structure
(dict) --
Contains the output from the DescribeClusters action.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
Clusters (list) --
A list of Cluster objects, where each object describes one cluster.
(dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'Clusters': [
{
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
},
]
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def describe_default_cluster_parameters(ParameterGroupFamily=None, MaxRecords=None, Marker=None):
"""
Returns a list of parameter settings for the specified parameter group family.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.describe_default_cluster_parameters(
ParameterGroupFamily='string',
MaxRecords=123,
Marker='string'
)
:type ParameterGroupFamily: string
:param ParameterGroupFamily: [REQUIRED]\nThe name of the cluster parameter group family.\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDefaultClusterParameters request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
ReturnsResponse Syntax
{
'DefaultClusterParameters': {
'ParameterGroupFamily': 'string',
'Marker': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'ApplyType': 'static'|'dynamic',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string'
},
]
}
}
Response Structure
(dict) --
DefaultClusterParameters (dict) --
Describes the default cluster parameters for a parameter group family.
ParameterGroupFamily (string) --
The name of the cluster parameter group family to which the engine default parameters apply.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
Parameters (list) --
The list of cluster default parameters.
(dict) --
Describes a parameter in a cluster parameter group.
ParameterName (string) --
The name of the parameter.
ParameterValue (string) --
The value of the parameter.
Description (string) --
A description of the parameter.
Source (string) --
The source of the parameter value, such as "engine-default" or "user".
DataType (string) --
The data type of the parameter.
AllowedValues (string) --
The valid range of values for the parameter.
ApplyType (string) --
Specifies how to apply the WLM configuration parameter. Some properties can be applied dynamically, while other properties require that any associated clusters be rebooted for the configuration changes to be applied. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
IsModifiable (boolean) --
If true , the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.
MinimumEngineVersion (string) --
The earliest engine version to which the parameter can apply.
:return: {
'DefaultClusterParameters': {
'ParameterGroupFamily': 'string',
'Marker': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'ApplyType': 'static'|'dynamic',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string'
},
]
}
}
"""
pass
def describe_event_categories(SourceType=None):
"""
Displays a list of event categories for all event source types, or for a specified source type. For a list of the event categories and source types, go to Amazon Redshift Event Notifications .
See also: AWS API Documentation
:example: response = client.describe_event_categories(
SourceType='string'
)
:type SourceType: string
:param SourceType: The source type, such as cluster or parameter group, to which the described event categories apply.\nValid values: cluster, cluster-snapshot, cluster-parameter-group, cluster-security-group, and scheduled-action.\n
:rtype: dict
ReturnsResponse Syntax{
'EventCategoriesMapList': [
{
'SourceType': 'string',
'Events': [
{
'EventId': 'string',
'EventCategories': [
'string',
],
'EventDescription': 'string',
'Severity': 'string'
},
]
},
]
}
Response Structure
(dict) --
EventCategoriesMapList (list) --A list of event categories descriptions.
(dict) --Describes event categories.
SourceType (string) --The source type, such as cluster or cluster-snapshot, that the returned categories belong to.
Events (list) --The events in the event category.
(dict) --Describes event information.
EventId (string) --The identifier of an Amazon Redshift event.
EventCategories (list) --The category of an Amazon Redshift event.
(string) --
EventDescription (string) --The description of an Amazon Redshift event.
Severity (string) --The severity of the event.
Values: ERROR, INFO
:return: {
'EventCategoriesMapList': [
{
'SourceType': 'string',
'Events': [
{
'EventId': 'string',
'EventCategories': [
'string',
],
'EventDescription': 'string',
'Severity': 'string'
},
]
},
]
}
"""
pass
def describe_event_subscriptions(SubscriptionName=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Lists descriptions of all the Amazon Redshift event notification subscriptions for a customer account. If you specify a subscription name, lists the description for that subscription.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all event notification subscriptions that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subscriptions that have any combination of those values are returned.
If both tag keys and values are omitted from the request, subscriptions are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_event_subscriptions(
SubscriptionName='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type SubscriptionName: string
:param SubscriptionName: The name of the Amazon Redshift event notification subscription to be described.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEventSubscriptions request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching event notification subscriptions that are associated with the specified key or keys. For example, suppose that you have subscriptions that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subscriptions that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching event notification subscriptions that are associated with the specified tag value or values. For example, suppose that you have subscriptions that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the subscriptions that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'EventSubscriptionsList': [
{
'CustomerAwsId': 'string',
'CustSubscriptionId': 'string',
'SnsTopicArn': 'string',
'Status': 'string',
'SubscriptionCreationTime': datetime(2015, 1, 1),
'SourceType': 'string',
'SourceIdsList': [
'string',
],
'EventCategoriesList': [
'string',
],
'Severity': 'string',
'Enabled': True|False,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
Response Structure
(dict) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
EventSubscriptionsList (list) --
A list of event subscriptions.
(dict) --
Describes event subscriptions.
CustomerAwsId (string) --
The AWS customer account associated with the Amazon Redshift event notification subscription.
CustSubscriptionId (string) --
The name of the Amazon Redshift event notification subscription.
SnsTopicArn (string) --
The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event notification subscription.
Status (string) --
The status of the Amazon Redshift event notification subscription.
Constraints:
Can be one of the following: active | no-permission | topic-not-exist
The status "no-permission" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.
SubscriptionCreationTime (datetime) --
The date and time the Amazon Redshift event notification subscription was created.
SourceType (string) --
The source type of the events returned by the Amazon Redshift event notification, such as cluster, cluster-snapshot, cluster-parameter-group, cluster-security-group, or scheduled-action.
SourceIdsList (list) --
A list of the sources that publish events to the Amazon Redshift event notification subscription.
(string) --
EventCategoriesList (list) --
The list of Amazon Redshift event categories specified in the event notification subscription.
Values: Configuration, Management, Monitoring, Security
(string) --
Severity (string) --
The event severity specified in the Amazon Redshift event notification subscription.
Values: ERROR, INFO
Enabled (boolean) --
A boolean value indicating whether the subscription is enabled; true indicates that the subscription is enabled.
Tags (list) --
The list of tags for the event subscription.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.SubscriptionNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'EventSubscriptionsList': [
{
'CustomerAwsId': 'string',
'CustSubscriptionId': 'string',
'SnsTopicArn': 'string',
'Status': 'string',
'SubscriptionCreationTime': datetime(2015, 1, 1),
'SourceType': 'string',
'SourceIdsList': [
'string',
],
'EventCategoriesList': [
'string',
],
'Severity': 'string',
'Enabled': True|False,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
:returns:
Can be one of the following: active | no-permission | topic-not-exist
The status "no-permission" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.
"""
pass
def describe_events(SourceIdentifier=None, SourceType=None, StartTime=None, EndTime=None, Duration=None, MaxRecords=None, Marker=None):
"""
Returns events related to clusters, security groups, snapshots, and parameter groups for the past 14 days. Events specific to a particular cluster, security group, snapshot or parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.
See also: AWS API Documentation
:example: response = client.describe_events(
SourceIdentifier='string',
SourceType='cluster'|'cluster-parameter-group'|'cluster-security-group'|'cluster-snapshot'|'scheduled-action',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Duration=123,
MaxRecords=123,
Marker='string'
)
:type SourceIdentifier: string
:param SourceIdentifier: The identifier of the event source for which events will be returned. If this parameter is not specified, then all sources are included in the response.\nConstraints:\nIf SourceIdentifier is supplied, SourceType must also be provided.\n\nSpecify a cluster identifier when SourceType is cluster .\nSpecify a cluster security group name when SourceType is cluster-security-group .\nSpecify a cluster parameter group name when SourceType is cluster-parameter-group .\nSpecify a cluster snapshot identifier when SourceType is cluster-snapshot .\n\n
:type SourceType: string
:param SourceType: The event source to retrieve events for. If no value is specified, all events are returned.\nConstraints:\nIf SourceType is supplied, SourceIdentifier must also be provided.\n\nSpecify cluster when SourceIdentifier is a cluster identifier.\nSpecify cluster-security-group when SourceIdentifier is a cluster security group name.\nSpecify cluster-parameter-group when SourceIdentifier is a cluster parameter group name.\nSpecify cluster-snapshot when SourceIdentifier is a cluster snapshot identifier.\n\n
:type StartTime: datetime
:param StartTime: The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.\nExample: 2009-07-08T18:00Z\n
:type EndTime: datetime
:param EndTime: The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.\nExample: 2009-07-08T18:00Z\n
:type Duration: integer
:param Duration: The number of minutes prior to the time of the request for which to retrieve events. For example, if the request is sent at 18:00 and you specify a duration of 60, then only events which have occurred after 17:00 will be returned.\nDefault: 60\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEvents request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'cluster'|'cluster-parameter-group'|'cluster-security-group'|'cluster-snapshot'|'scheduled-action',
'Message': 'string',
'EventCategories': [
'string',
],
'Severity': 'string',
'Date': datetime(2015, 1, 1),
'EventId': 'string'
},
]
}
Response Structure
(dict) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
Events (list) --
A list of Event instances.
(dict) --
Describes an event.
SourceIdentifier (string) --
The identifier for the source of the event.
SourceType (string) --
The source type for this event.
Message (string) --
The text of this event.
EventCategories (list) --
A list of the event categories.
Values: Configuration, Management, Monitoring, Security
(string) --
Severity (string) --
The severity of the event.
Values: ERROR, INFO
Date (datetime) --
The date and time of the event.
EventId (string) --
The identifier of the event.
:return: {
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'cluster'|'cluster-parameter-group'|'cluster-security-group'|'cluster-snapshot'|'scheduled-action',
'Message': 'string',
'EventCategories': [
'string',
],
'Severity': 'string',
'Date': datetime(2015, 1, 1),
'EventId': 'string'
},
]
}
:returns:
(string) --
"""
pass
def describe_hsm_client_certificates(HsmClientCertificateIdentifier=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM client certificates that have any combination of those values are returned.
If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_hsm_client_certificates(
HsmClientCertificateIdentifier='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type HsmClientCertificateIdentifier: string
:param HsmClientCertificateIdentifier: The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your AWS customer account.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmClientCertificates request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching HSM client certificates that are associated with the specified key or keys. For example, suppose that you have HSM client certificates that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching HSM client certificates that are associated with the specified tag value or values. For example, suppose that you have HSM client certificates that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'HsmClientCertificates': [
{
'HsmClientCertificateIdentifier': 'string',
'HsmClientCertificatePublicKey': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
Response Structure
(dict) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
HsmClientCertificates (list) --
A list of the identifiers for one or more HSM client certificates used by Amazon Redshift clusters to store and retrieve database encryption keys in an HSM.
(dict) --
Returns information about an HSM client certificate. The certificate is stored in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift cluster to encrypt data files.
HsmClientCertificateIdentifier (string) --
The identifier of the HSM client certificate.
HsmClientCertificatePublicKey (string) --
The public key that the Amazon Redshift cluster will use to connect to the HSM. You must register the public key in the HSM.
Tags (list) --
The list of tags for the HSM client certificate.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.HsmClientCertificateNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'HsmClientCertificates': [
{
'HsmClientCertificateIdentifier': 'string',
'HsmClientCertificatePublicKey': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
:returns:
Redshift.Client.exceptions.HsmClientCertificateNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def describe_hsm_configurations(HsmConfigurationIdentifier=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM connections that have any combination of those values are returned.
If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_hsm_configurations(
HsmConfigurationIdentifier='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: The identifier of a specific Amazon Redshift HSM configuration to be described. If no identifier is specified, information is returned for all HSM configurations owned by your AWS customer account.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmConfigurations request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching HSM configurations that are associated with the specified key or keys. For example, suppose that you have HSM configurations that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching HSM configurations that are associated with the specified tag value or values. For example, suppose that you have HSM configurations that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'HsmConfigurations': [
{
'HsmConfigurationIdentifier': 'string',
'Description': 'string',
'HsmIpAddress': 'string',
'HsmPartitionName': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
Response Structure
(dict) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
HsmConfigurations (list) --
A list of HsmConfiguration objects.
(dict) --
Returns information about an HSM configuration, which is an object that describes to Amazon Redshift clusters the information they require to connect to an HSM where they can store database encryption keys.
HsmConfigurationIdentifier (string) --
The name of the Amazon Redshift HSM configuration.
Description (string) --
A text description of the HSM configuration.
HsmIpAddress (string) --
The IP address that the Amazon Redshift cluster must use to access the HSM.
HsmPartitionName (string) --
The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
Tags (list) --
The list of tags for the HSM configuration.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.HsmConfigurationNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'HsmConfigurations': [
{
'HsmConfigurationIdentifier': 'string',
'Description': 'string',
'HsmIpAddress': 'string',
'HsmPartitionName': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
:returns:
Redshift.Client.exceptions.HsmConfigurationNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def describe_logging_status(ClusterIdentifier=None):
"""
Describes whether information, such as queries and connection attempts, is being logged for the specified Amazon Redshift cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_logging_status(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the cluster from which to get the logging status.\nExample: examplecluster\n
:rtype: dict
ReturnsResponse Syntax{
'LoggingEnabled': True|False,
'BucketName': 'string',
'S3KeyPrefix': 'string',
'LastSuccessfulDeliveryTime': datetime(2015, 1, 1),
'LastFailureTime': datetime(2015, 1, 1),
'LastFailureMessage': 'string'
}
Response Structure
(dict) --Describes the status of logging for a cluster.
LoggingEnabled (boolean) --
true if logging is on, false if logging is off.
BucketName (string) --The name of the S3 bucket where the log files are stored.
S3KeyPrefix (string) --The prefix applied to the log file names.
LastSuccessfulDeliveryTime (datetime) --The last time that logs were delivered.
LastFailureTime (datetime) --The last time when logs failed to be delivered.
LastFailureMessage (string) --The message indicating that logs failed to be delivered.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
:return: {
'LoggingEnabled': True|False,
'BucketName': 'string',
'S3KeyPrefix': 'string',
'LastSuccessfulDeliveryTime': datetime(2015, 1, 1),
'LastFailureTime': datetime(2015, 1, 1),
'LastFailureMessage': 'string'
}
"""
pass
def describe_node_configuration_options(ActionType=None, ClusterIdentifier=None, SnapshotIdentifier=None, OwnerAccount=None, Filters=None, Marker=None, MaxRecords=None):
"""
Returns properties of possible node configurations such as node type, number of nodes, and disk usage for the specified action type.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_node_configuration_options(
ActionType='restore-cluster'|'recommend-node-config'|'resize-cluster',
ClusterIdentifier='string',
SnapshotIdentifier='string',
OwnerAccount='string',
Filters=[
{
'Name': 'NodeType'|'NumberOfNodes'|'EstimatedDiskUtilizationPercent'|'Mode',
'Operator': 'eq'|'lt'|'gt'|'le'|'ge'|'in'|'between',
'Values': [
'string',
]
},
],
Marker='string',
MaxRecords=123
)
:type ActionType: string
:param ActionType: [REQUIRED]\nThe action type to evaluate for possible node configurations. Specify 'restore-cluster' to get configuration combinations based on an existing snapshot. Specify 'recommend-node-config' to get configuration recommendations based on an existing cluster or snapshot. Specify 'resize-cluster' to get configuration combinations for elastic resize based on an existing cluster.\n
:type ClusterIdentifier: string
:param ClusterIdentifier: The identifier of the cluster to evaluate for possible node configurations.
:type SnapshotIdentifier: string
:param SnapshotIdentifier: The identifier of the snapshot to evaluate for possible node configurations.
:type OwnerAccount: string
:param OwnerAccount: The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.
:type Filters: list
:param Filters: A set of name, operator, and value items to filter the results.\n\n(dict) --A set of elements to filter the returned node configurations.\n\nName (string) --The name of the element to filter.\n\nOperator (string) --The filter operator. If filter Name is NodeType only the \'in\' operator is supported. Provide one value to evaluate for \'eq\', \'lt\', \'le\', \'gt\', and \'ge\'. Provide two values to evaluate for \'between\'. Provide a list of values for \'in\'.\n\nValues (list) --List of values. Compare Name using Operator to Values. If filter Name is NumberOfNodes, then values can range from 0 to 200. If filter Name is EstimatedDiskUtilizationPercent, then values can range from 0 to 100. For example, filter NumberOfNodes (name) GT (operator) 3 (values).\n\n(string) --\n\n\n\n\n\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeNodeConfigurationOptions request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 500\nConstraints: minimum 100, maximum 500.\n
:rtype: dict
ReturnsResponse Syntax
{
'NodeConfigurationOptionList': [
{
'NodeType': 'string',
'NumberOfNodes': 123,
'EstimatedDiskUtilizationPercent': 123.0,
'Mode': 'standard'|'high-performance'
},
],
'Marker': 'string'
}
Response Structure
(dict) --
NodeConfigurationOptionList (list) --
A list of valid node configurations.
(dict) --
A list of node configurations.
NodeType (string) --
The node type, such as, "ds2.8xlarge".
NumberOfNodes (integer) --
The number of nodes.
EstimatedDiskUtilizationPercent (float) --
The estimated disk utilizaton percentage.
Mode (string) --
The category of the node configuration recommendation.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
Exceptions
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.AccessToSnapshotDeniedFault
:return: {
'NodeConfigurationOptionList': [
{
'NodeType': 'string',
'NumberOfNodes': 123,
'EstimatedDiskUtilizationPercent': 123.0,
'Mode': 'standard'|'high-performance'
},
],
'Marker': 'string'
}
:returns:
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.AccessToSnapshotDeniedFault
"""
pass
def describe_orderable_cluster_options(ClusterVersion=None, NodeType=None, MaxRecords=None, Marker=None):
"""
Returns a list of orderable cluster options. Before you create a new cluster you can use this operation to find what options are available, such as the EC2 Availability Zones (AZ) in the specific AWS Region that you can specify, and the node types you can request. The node types differ by available storage, memory, CPU and price. With the cost involved you might want to obtain a list of cluster options in the specific region and specify values when creating a cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.describe_orderable_cluster_options(
ClusterVersion='string',
NodeType='string',
MaxRecords=123,
Marker='string'
)
:type ClusterVersion: string
:param ClusterVersion: The version filter value. Specify this parameter to show only the available offerings matching the specified version.\nDefault: All versions.\nConstraints: Must be one of the version returned from DescribeClusterVersions .\n
:type NodeType: string
:param NodeType: The node type filter value. Specify this parameter to show only the available offerings matching the specified node type.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeOrderableClusterOptions request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
ReturnsResponse Syntax
{
'OrderableClusterOptions': [
{
'ClusterVersion': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'AvailabilityZones': [
{
'Name': 'string',
'SupportedPlatforms': [
{
'Name': 'string'
},
]
},
]
},
],
'Marker': 'string'
}
Response Structure
(dict) --
Contains the output from the DescribeOrderableClusterOptions action.
OrderableClusterOptions (list) --
An OrderableClusterOption structure containing information about orderable options for the cluster.
(dict) --
Describes an orderable cluster option.
ClusterVersion (string) --
The version of the orderable cluster.
ClusterType (string) --
The cluster type, for example multi-node .
NodeType (string) --
The node type for the orderable cluster.
AvailabilityZones (list) --
A list of availability zones for the orderable cluster.
(dict) --
Describes an availability zone.
Name (string) --
The name of the availability zone.
SupportedPlatforms (list) --
(dict) --
A list of supported platforms for orderable clusters.
Name (string) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
:return: {
'OrderableClusterOptions': [
{
'ClusterVersion': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'AvailabilityZones': [
{
'Name': 'string',
'SupportedPlatforms': [
{
'Name': 'string'
},
]
},
]
},
],
'Marker': 'string'
}
:returns:
Name (string) --
"""
pass
def describe_reserved_node_offerings(ReservedNodeOfferingId=None, MaxRecords=None, Marker=None):
"""
Returns a list of the available reserved node offerings by Amazon Redshift with their descriptions including the node type, the fixed and recurring costs of reserving the node and duration the node will be reserved for you. These descriptions help you determine which reserve node offering you want to purchase. You then use the unique offering ID in you call to PurchaseReservedNodeOffering to reserve one or more nodes for your Amazon Redshift cluster.
For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.describe_reserved_node_offerings(
ReservedNodeOfferingId='string',
MaxRecords=123,
Marker='string'
)
:type ReservedNodeOfferingId: string
:param ReservedNodeOfferingId: The unique identifier for the offering.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodeOfferings request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ReservedNodeOfferings': [
{
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
},
]
}
Response Structure
(dict) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
ReservedNodeOfferings (list) --
A list of ReservedNodeOffering objects.
(dict) --
Describes a reserved node offering.
ReservedNodeOfferingId (string) --
The offering identifier.
NodeType (string) --
The node type offered by the reserved node offering.
Duration (integer) --
The duration, in seconds, for which the offering will reserve the node.
FixedPrice (float) --
The upfront fixed charge you will pay to purchase the specific reserved node offering.
UsagePrice (float) --
The rate you are charged for each hour the cluster that is using the offering is running.
CurrencyCode (string) --
The currency code for the compute nodes offering.
OfferingType (string) --
The anticipated utilization of the reserved node, as defined in the reserved node offering.
RecurringCharges (list) --
The charge to your account regardless of whether you are creating any clusters using the node offering. Recurring charges are only in effect for heavy-utilization reserved nodes.
(dict) --
Describes a recurring charge.
RecurringChargeAmount (float) --
The amount charged per the period of time specified by the recurring charge frequency.
RecurringChargeFrequency (string) --
The frequency at which the recurring charge amount is applied.
ReservedNodeOfferingType (string) --
Exceptions
Redshift.Client.exceptions.ReservedNodeOfferingNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
Redshift.Client.exceptions.DependentServiceUnavailableFault
:return: {
'Marker': 'string',
'ReservedNodeOfferings': [
{
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
},
]
}
:returns:
Redshift.Client.exceptions.ReservedNodeOfferingNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
Redshift.Client.exceptions.DependentServiceUnavailableFault
"""
pass
def describe_reserved_nodes(ReservedNodeId=None, MaxRecords=None, Marker=None):
"""
Returns the descriptions of the reserved nodes.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_reserved_nodes(
ReservedNodeId='string',
MaxRecords=123,
Marker='string'
)
:type ReservedNodeId: string
:param ReservedNodeId: Identifier for the node reservation.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodes request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ReservedNodes': [
{
'ReservedNodeId': 'string',
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'StartTime': datetime(2015, 1, 1),
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'NodeCount': 123,
'State': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
},
]
}
Response Structure
(dict) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
ReservedNodes (list) --
The list of ReservedNode objects.
(dict) --
Describes a reserved node. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings.
ReservedNodeId (string) --
The unique identifier for the reservation.
ReservedNodeOfferingId (string) --
The identifier for the reserved node offering.
NodeType (string) --
The node type of the reserved node.
StartTime (datetime) --
The time the reservation started. You purchase a reserved node offering for a duration. This is the start time of that duration.
Duration (integer) --
The duration of the node reservation in seconds.
FixedPrice (float) --
The fixed cost Amazon Redshift charges you for this reserved node.
UsagePrice (float) --
The hourly rate Amazon Redshift charges you for this reserved node.
CurrencyCode (string) --
The currency code for the reserved cluster.
NodeCount (integer) --
The number of reserved compute nodes.
State (string) --
The state of the reserved compute node.
Possible Values:
pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
active-This reserved node is owned by the caller and is available for use.
payment-failed-Payment failed for the purchase attempt.
retired-The reserved node is no longer available.
exchanging-The owner is exchanging the reserved node for another reserved node.
OfferingType (string) --
The anticipated utilization of the reserved node, as defined in the reserved node offering.
RecurringCharges (list) --
The recurring charges for the reserved node.
(dict) --
Describes a recurring charge.
RecurringChargeAmount (float) --
The amount charged per the period of time specified by the recurring charge frequency.
RecurringChargeFrequency (string) --
The frequency at which the recurring charge amount is applied.
ReservedNodeOfferingType (string) --
Exceptions
Redshift.Client.exceptions.ReservedNodeNotFoundFault
Redshift.Client.exceptions.DependentServiceUnavailableFault
:return: {
'Marker': 'string',
'ReservedNodes': [
{
'ReservedNodeId': 'string',
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'StartTime': datetime(2015, 1, 1),
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'NodeCount': 123,
'State': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
},
]
}
:returns:
pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
active-This reserved node is owned by the caller and is available for use.
payment-failed-Payment failed for the purchase attempt.
retired-The reserved node is no longer available.
exchanging-The owner is exchanging the reserved node for another reserved node.
"""
pass
def describe_resize(ClusterIdentifier=None):
"""
Returns information about the last resize operation for the specified cluster. If no resize operation has ever been initiated for the specified cluster, a HTTP 404 error is returned. If a resize operation was initiated and completed, the status of the resize remains as SUCCEEDED until the next resize.
A resize operation can be requested using ModifyCluster and specifying a different number or type of nodes for the cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_resize(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of a cluster whose resize progress you are requesting. This parameter is case-sensitive.\nBy default, resize operations for all clusters defined for an AWS account are returned.\n
:rtype: dict
ReturnsResponse Syntax{
'TargetNodeType': 'string',
'TargetNumberOfNodes': 123,
'TargetClusterType': 'string',
'Status': 'string',
'ImportTablesCompleted': [
'string',
],
'ImportTablesInProgress': [
'string',
],
'ImportTablesNotStarted': [
'string',
],
'AvgResizeRateInMegaBytesPerSecond': 123.0,
'TotalResizeDataInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ResizeType': 'string',
'Message': 'string',
'TargetEncryptionType': 'string',
'DataTransferProgressPercent': 123.0
}
Response Structure
(dict) --Describes the result of a cluster resize operation.
TargetNodeType (string) --The node type that the cluster will have after the resize operation is complete.
TargetNumberOfNodes (integer) --The number of nodes that the cluster will have after the resize operation is complete.
TargetClusterType (string) --The cluster type after the resize operation is complete.
Valid Values: multi-node | single-node
Status (string) --The status of the resize operation.
Valid Values: NONE | IN_PROGRESS | FAILED | SUCCEEDED | CANCELLING
ImportTablesCompleted (list) --The names of tables that have been completely imported .
Valid Values: List of table names.
(string) --
ImportTablesInProgress (list) --The names of tables that are being currently imported.
Valid Values: List of table names.
(string) --
ImportTablesNotStarted (list) --The names of tables that have not been yet imported.
Valid Values: List of table names
(string) --
AvgResizeRateInMegaBytesPerSecond (float) --The average rate of the resize operation over the last few minutes, measured in megabytes per second. After the resize operation completes, this value shows the average rate of the entire resize operation.
TotalResizeDataInMegaBytes (integer) --The estimated total amount of data, in megabytes, on the cluster before the resize operation began.
ProgressInMegaBytes (integer) --While the resize operation is in progress, this value shows the current amount of data, in megabytes, that has been processed so far. When the resize operation is complete, this value shows the total amount of data, in megabytes, on the cluster, which may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data before resize).
ElapsedTimeInSeconds (integer) --The amount of seconds that have elapsed since the resize operation began. After the resize operation completes, this value shows the total actual time, in seconds, for the resize operation.
EstimatedTimeToCompletionInSeconds (integer) --The estimated time remaining, in seconds, until the resize operation is complete. This value is calculated based on the average resize rate and the estimated amount of data remaining to be processed. Once the resize operation is complete, this value will be 0.
ResizeType (string) --An enum with possible values of ClassicResize and ElasticResize . These values describe the type of resize operation being performed.
Message (string) --An optional string to provide additional details about the resize action.
TargetEncryptionType (string) --The type of encryption for the cluster after the resize is complete.
Possible values are KMS and None . In the China region possible values are: Legacy and None .
DataTransferProgressPercent (float) --The percent of data transferred from source cluster to target cluster.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.ResizeNotFoundFault
:return: {
'TargetNodeType': 'string',
'TargetNumberOfNodes': 123,
'TargetClusterType': 'string',
'Status': 'string',
'ImportTablesCompleted': [
'string',
],
'ImportTablesInProgress': [
'string',
],
'ImportTablesNotStarted': [
'string',
],
'AvgResizeRateInMegaBytesPerSecond': 123.0,
'TotalResizeDataInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ResizeType': 'string',
'Message': 'string',
'TargetEncryptionType': 'string',
'DataTransferProgressPercent': 123.0
}
:returns:
(string) --
"""
pass
def describe_scheduled_actions(ScheduledActionName=None, TargetActionType=None, StartTime=None, EndTime=None, Active=None, Filters=None, Marker=None, MaxRecords=None):
"""
Describes properties of scheduled actions.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_scheduled_actions(
ScheduledActionName='string',
TargetActionType='ResizeCluster'|'PauseCluster'|'ResumeCluster',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Active=True|False,
Filters=[
{
'Name': 'cluster-identifier'|'iam-role',
'Values': [
'string',
]
},
],
Marker='string',
MaxRecords=123
)
:type ScheduledActionName: string
:param ScheduledActionName: The name of the scheduled action to retrieve.
:type TargetActionType: string
:param TargetActionType: The type of the scheduled actions to retrieve.
:type StartTime: datetime
:param StartTime: The start time in UTC of the scheduled actions to retrieve. Only active scheduled actions that have invocations after this time are retrieved.
:type EndTime: datetime
:param EndTime: The end time in UTC of the scheduled action to retrieve. Only active scheduled actions that have invocations before this time are retrieved.
:type Active: boolean
:param Active: If true, retrieve only active scheduled actions. If false, retrieve only disabled scheduled actions.
:type Filters: list
:param Filters: List of scheduled action filters.\n\n(dict) --A set of elements to filter the returned scheduled actions.\n\nName (string) -- [REQUIRED]The type of element to filter.\n\nValues (list) -- [REQUIRED]List of values. Compare if the value (of type defined by Name ) equals an item in the list of scheduled actions.\n\n(string) --\n\n\n\n\n\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeScheduledActions request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ScheduledActions': [
{
'ScheduledActionName': 'string',
'TargetAction': {
'ResizeCluster': {
'ClusterIdentifier': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'Classic': True|False
},
'PauseCluster': {
'ClusterIdentifier': 'string'
},
'ResumeCluster': {
'ClusterIdentifier': 'string'
}
},
'Schedule': 'string',
'IamRole': 'string',
'ScheduledActionDescription': 'string',
'State': 'ACTIVE'|'DISABLED',
'NextInvocations': [
datetime(2015, 1, 1),
],
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
},
]
}
Response Structure
(dict) --
Marker (string) --
An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeScheduledActions request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
ScheduledActions (list) --
List of retrieved scheduled actions.
(dict) --
Describes a scheduled action. You can use a scheduled action to trigger some Amazon Redshift API operations on a schedule. For information about which API operations can be scheduled, see ScheduledActionType .
ScheduledActionName (string) --
The name of the scheduled action.
TargetAction (dict) --
A JSON format string of the Amazon Redshift API operation with input parameters.
"{\\"ResizeCluster\\":{\\"NodeType\\":\\"ds2.8xlarge\\",\\"ClusterIdentifier\\":\\"my-test-cluster\\",\\"NumberOfNodes\\":3}} ".
ResizeCluster (dict) --
An action that runs a ResizeCluster API operation.
ClusterIdentifier (string) --
The unique identifier for the cluster to resize.
ClusterType (string) --
The new cluster type for the specified cluster.
NodeType (string) --
The new node type for the nodes you are adding. If not specified, the cluster\'s current node type is used.
NumberOfNodes (integer) --
The new number of nodes for the cluster.
Classic (boolean) --
A boolean value indicating whether the resize operation is using the classic resize process. If you don\'t provide this parameter or set the value to false , the resize type is elastic.
PauseCluster (dict) --
An action that runs a PauseCluster API operation.
ClusterIdentifier (string) --
The identifier of the cluster to be paused.
ResumeCluster (dict) --
An action that runs a ResumeCluster API operation.
ClusterIdentifier (string) --
The identifier of the cluster to be resumed.
Schedule (string) --
The schedule for a one-time (at format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour.
Format of at expressions is "at(yyyy-mm-ddThh:mm:ss) ". For example, "at(2016-03-04T17:27:00) ".
Format of cron expressions is "cron(Minutes Hours Day-of-month Month Day-of-week Year) ". For example, "cron(0 10 ? * MON *) ". For more information, see Cron Expressions in the Amazon CloudWatch Events User Guide .
IamRole (string) --
The IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide .
ScheduledActionDescription (string) --
The description of the scheduled action.
State (string) --
The state of the scheduled action. For example, DISABLED .
NextInvocations (list) --
List of times when the scheduled action will run.
(datetime) --
StartTime (datetime) --
The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger.
EndTime (datetime) --
The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.
Exceptions
Redshift.Client.exceptions.ScheduledActionNotFoundFault
Redshift.Client.exceptions.UnauthorizedOperation
:return: {
'Marker': 'string',
'ScheduledActions': [
{
'ScheduledActionName': 'string',
'TargetAction': {
'ResizeCluster': {
'ClusterIdentifier': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'Classic': True|False
},
'PauseCluster': {
'ClusterIdentifier': 'string'
},
'ResumeCluster': {
'ClusterIdentifier': 'string'
}
},
'Schedule': 'string',
'IamRole': 'string',
'ScheduledActionDescription': 'string',
'State': 'ACTIVE'|'DISABLED',
'NextInvocations': [
datetime(2015, 1, 1),
],
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
},
]
}
:returns:
(datetime) --
"""
pass
def describe_snapshot_copy_grants(SnapshotCopyGrantName=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns a list of snapshot copy grants owned by the AWS account in the destination region.
For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.describe_snapshot_copy_grants(
SnapshotCopyGrantName='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type SnapshotCopyGrantName: string
:param SnapshotCopyGrantName: The name of the snapshot copy grant.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.\nConstraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.\n
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'SnapshotCopyGrants': [
{
'SnapshotCopyGrantName': 'string',
'KmsKeyId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
Response Structure
(dict) --
Marker (string) --
An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.
SnapshotCopyGrants (list) --
The list of SnapshotCopyGrant objects.
(dict) --
The snapshot copy grant that grants Amazon Redshift permission to encrypt copied snapshots with the specified customer master key (CMK) from AWS KMS in the destination region.
For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide .
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
KmsKeyId (string) --
The unique identifier of the customer master key (CMK) in AWS KMS to which Amazon Redshift is granted permission.
Tags (list) --
A list of tag instances.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.SnapshotCopyGrantNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Marker': 'string',
'SnapshotCopyGrants': [
{
'SnapshotCopyGrantName': 'string',
'KmsKeyId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
:returns:
Redshift.Client.exceptions.SnapshotCopyGrantNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
"""
pass
def describe_snapshot_schedules(ClusterIdentifier=None, ScheduleIdentifier=None, TagKeys=None, TagValues=None, Marker=None, MaxRecords=None):
"""
Returns a list of snapshot schedules.
See also: AWS API Documentation
:example: response = client.describe_snapshot_schedules(
ClusterIdentifier='string',
ScheduleIdentifier='string',
TagKeys=[
'string',
],
TagValues=[
'string',
],
Marker='string',
MaxRecords=123
)
:type ClusterIdentifier: string
:param ClusterIdentifier: The unique identifier for the cluster whose snapshot schedules you want to view.
:type ScheduleIdentifier: string
:param ScheduleIdentifier: A unique identifier for a snapshot schedule.
:type TagKeys: list
:param TagKeys: The key value for a snapshot schedule tag.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: The value corresponding to the key of the snapshot schedule tag.\n\n(string) --\n\n
:type Marker: string
:param Marker: A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.
:type MaxRecords: integer
:param MaxRecords: The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
:rtype: dict
ReturnsResponse Syntax
{
'SnapshotSchedules': [
{
'ScheduleDefinitions': [
'string',
],
'ScheduleIdentifier': 'string',
'ScheduleDescription': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextInvocations': [
datetime(2015, 1, 1),
],
'AssociatedClusterCount': 123,
'AssociatedClusters': [
{
'ClusterIdentifier': 'string',
'ScheduleAssociationState': 'MODIFYING'|'ACTIVE'|'FAILED'
},
]
},
],
'Marker': 'string'
}
Response Structure
(dict) --
SnapshotSchedules (list) --
A list of SnapshotSchedules.
(dict) --
Describes a snapshot schedule. You can set a regular interval for creating snapshots of a cluster. You can also schedule snapshots for specific dates.
ScheduleDefinitions (list) --
A list of ScheduleDefinitions.
(string) --
ScheduleIdentifier (string) --
A unique identifier for the schedule.
ScheduleDescription (string) --
The description of the schedule.
Tags (list) --
An optional set of tags describing the schedule.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
NextInvocations (list) --
(datetime) --
AssociatedClusterCount (integer) --
The number of clusters associated with the schedule.
AssociatedClusters (list) --
A list of clusters associated with the schedule. A maximum of 100 clusters is returned.
(dict) --
ClusterIdentifier (string) --
ScheduleAssociationState (string) --
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.
:return: {
'SnapshotSchedules': [
{
'ScheduleDefinitions': [
'string',
],
'ScheduleIdentifier': 'string',
'ScheduleDescription': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextInvocations': [
datetime(2015, 1, 1),
],
'AssociatedClusterCount': 123,
'AssociatedClusters': [
{
'ClusterIdentifier': 'string',
'ScheduleAssociationState': 'MODIFYING'|'ACTIVE'|'FAILED'
},
]
},
],
'Marker': 'string'
}
:returns:
(string) --
"""
pass
def describe_storage():
"""
Returns account level backups storage size and provisional storage.
See also: AWS API Documentation
:example: response = client.describe_storage()
:rtype: dict
ReturnsResponse Syntax{
'TotalBackupSizeInMegaBytes': 123.0,
'TotalProvisionedStorageInMegaBytes': 123.0
}
Response Structure
(dict) --
TotalBackupSizeInMegaBytes (float) --The total amount of storage currently used for snapshots.
TotalProvisionedStorageInMegaBytes (float) --The total amount of storage currently provisioned.
:return: {
'TotalBackupSizeInMegaBytes': 123.0,
'TotalProvisionedStorageInMegaBytes': 123.0
}
"""
pass
def describe_table_restore_status(ClusterIdentifier=None, TableRestoreRequestId=None, MaxRecords=None, Marker=None):
"""
Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot API action. If you don\'t specify a value for the TableRestoreRequestId parameter, then DescribeTableRestoreStatus returns the status of all table restore requests ordered by the date and time of the request in ascending order. Otherwise DescribeTableRestoreStatus returns the status of the table specified by TableRestoreRequestId .
See also: AWS API Documentation
Exceptions
:example: response = client.describe_table_restore_status(
ClusterIdentifier='string',
TableRestoreRequestId='string',
MaxRecords=123,
Marker='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: The Amazon Redshift cluster that the table is being restored to.
:type TableRestoreRequestId: string
:param TableRestoreRequestId: The identifier of the table restore request to return status for. If you don\'t specify a TableRestoreRequestId value, then DescribeTableRestoreStatus returns the status of all in-progress table restore requests.
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
:type Marker: string
:param Marker: An optional pagination token provided by a previous DescribeTableRestoreStatus request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by the MaxRecords parameter.
:rtype: dict
ReturnsResponse Syntax
{
'TableRestoreStatusDetails': [
{
'TableRestoreRequestId': 'string',
'Status': 'PENDING'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'CANCELED',
'Message': 'string',
'RequestTime': datetime(2015, 1, 1),
'ProgressInMegaBytes': 123,
'TotalDataInMegaBytes': 123,
'ClusterIdentifier': 'string',
'SnapshotIdentifier': 'string',
'SourceDatabaseName': 'string',
'SourceSchemaName': 'string',
'SourceTableName': 'string',
'TargetDatabaseName': 'string',
'TargetSchemaName': 'string',
'NewTableName': 'string'
},
],
'Marker': 'string'
}
Response Structure
(dict) --
TableRestoreStatusDetails (list) --
A list of status details for one or more table restore requests.
(dict) --
Describes the status of a RestoreTableFromClusterSnapshot operation.
TableRestoreRequestId (string) --
The unique identifier for the table restore request.
Status (string) --
A value that describes the current state of the table restore request.
Valid Values: SUCCEEDED , FAILED , CANCELED , PENDING , IN_PROGRESS
Message (string) --
A description of the status of the table restore request. Status values include SUCCEEDED , FAILED , CANCELED , PENDING , IN_PROGRESS .
RequestTime (datetime) --
The time that the table restore request was made, in Universal Coordinated Time (UTC).
ProgressInMegaBytes (integer) --
The amount of data restored to the new table so far, in megabytes (MB).
TotalDataInMegaBytes (integer) --
The total amount of data to restore to the new table, in megabytes (MB).
ClusterIdentifier (string) --
The identifier of the Amazon Redshift cluster that the table is being restored to.
SnapshotIdentifier (string) --
The identifier of the snapshot that the table is being restored from.
SourceDatabaseName (string) --
The name of the source database that contains the table being restored.
SourceSchemaName (string) --
The name of the source schema that contains the table being restored.
SourceTableName (string) --
The name of the source table being restored.
TargetDatabaseName (string) --
The name of the database to restore the table to.
TargetSchemaName (string) --
The name of the schema to restore the table to.
NewTableName (string) --
The name of the table to create as a result of the table restore request.
Marker (string) --
A pagination token that can be used in a subsequent DescribeTableRestoreStatus request.
Exceptions
Redshift.Client.exceptions.TableRestoreNotFoundFault
Redshift.Client.exceptions.ClusterNotFoundFault
:return: {
'TableRestoreStatusDetails': [
{
'TableRestoreRequestId': 'string',
'Status': 'PENDING'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'CANCELED',
'Message': 'string',
'RequestTime': datetime(2015, 1, 1),
'ProgressInMegaBytes': 123,
'TotalDataInMegaBytes': 123,
'ClusterIdentifier': 'string',
'SnapshotIdentifier': 'string',
'SourceDatabaseName': 'string',
'SourceSchemaName': 'string',
'SourceTableName': 'string',
'TargetDatabaseName': 'string',
'TargetSchemaName': 'string',
'NewTableName': 'string'
},
],
'Marker': 'string'
}
:returns:
Redshift.Client.exceptions.TableRestoreNotFoundFault
Redshift.Client.exceptions.ClusterNotFoundFault
"""
pass
def describe_tags(ResourceName=None, ResourceType=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns a list of tags. You can return tags from a specific resource by specifying an ARN, or you can return all tags for a given type of resource, such as clusters, snapshots, and so on.
The following are limitations for DescribeTags :
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all resources that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all resources that have any combination of those values are returned.
If both tag keys and values are omitted from the request, resources are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_tags(
ResourceName='string',
ResourceType='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type ResourceName: string
:param ResourceName: The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1 .
:type ResourceType: string
:param ResourceType: The type of resource with which you want to view tags. Valid resource types are:\n\nCluster\nCIDR/IP\nEC2 security group\nSnapshot\nCluster security group\nSubnet group\nHSM connection\nHSM certificate\nParameter group\nSnapshot copy grant\n\nFor more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide.\n
:type MaxRecords: integer
:param MaxRecords: The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
:type Marker: string
:param Marker: A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TaggedResources': [
{
'Tag': {
'Key': 'string',
'Value': 'string'
},
'ResourceName': 'string',
'ResourceType': 'string'
},
],
'Marker': 'string'
}
Response Structure
(dict) --
TaggedResources (list) --
A list of tags with their associated resources.
(dict) --
A tag and its associated resource.
Tag (dict) --
The tag for the resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
ResourceName (string) --
The Amazon Resource Name (ARN) with which the tag is associated, for example: arn:aws:redshift:us-east-2:123456789:cluster:t1 .
ResourceType (string) --
The type of resource with which the tag is associated. Valid resource types are:
Cluster
CIDR/IP
EC2 security group
Snapshot
Cluster security group
Subnet group
HSM connection
HSM certificate
Parameter group
For more information about Amazon Redshift resource types and constructing ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) in the Amazon Redshift Cluster Management Guide.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
Exceptions
Redshift.Client.exceptions.ResourceNotFoundFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'TaggedResources': [
{
'Tag': {
'Key': 'string',
'Value': 'string'
},
'ResourceName': 'string',
'ResourceType': 'string'
},
],
'Marker': 'string'
}
:returns:
ResourceName (string) -- The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1 .
ResourceType (string) -- The type of resource with which you want to view tags. Valid resource types are:
Cluster
CIDR/IP
EC2 security group
Snapshot
Cluster security group
Subnet group
HSM connection
HSM certificate
Parameter group
Snapshot copy grant
For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide.
MaxRecords (integer) -- The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
Marker (string) -- A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.
TagKeys (list) -- A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.
(string) --
TagValues (list) -- A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.
(string) --
"""
pass
def describe_usage_limits(UsageLimitId=None, ClusterIdentifier=None, FeatureType=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Shows usage limits on a cluster. Results are filtered based on the combination of input usage limit identifier, cluster identifier, and feature type parameters:
See also: AWS API Documentation
Exceptions
:example: response = client.describe_usage_limits(
UsageLimitId='string',
ClusterIdentifier='string',
FeatureType='spectrum'|'concurrency-scaling',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type UsageLimitId: string
:param UsageLimitId: The identifier of the usage limit to describe.
:type ClusterIdentifier: string
:param ClusterIdentifier: The identifier of the cluster for which you want to describe usage limits.
:type FeatureType: string
:param FeatureType: The feature type for which you want to describe usage limits.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.\nDefault: 100\nConstraints: minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeUsageLimits request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching usage limit objects that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the usage limit objects have either or both of these tag keys associated with them.\n\n(string) --\n\n
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching usage limit objects that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the usage limit objects that have either or both of these tag values associated with them.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'UsageLimits': [
{
'UsageLimitId': 'string',
'ClusterIdentifier': 'string',
'FeatureType': 'spectrum'|'concurrency-scaling',
'LimitType': 'time'|'data-scanned',
'Amount': 123,
'Period': 'daily'|'weekly'|'monthly',
'BreachAction': 'log'|'emit-metric'|'disable',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Marker': 'string'
}
Response Structure
(dict) --
UsageLimits (list) --
Contains the output from the DescribeUsageLimits action.
(dict) --
Describes a usage limit object for a cluster.
UsageLimitId (string) --
The identifier of the usage limit.
ClusterIdentifier (string) --
The identifier of the cluster with a usage limit.
FeatureType (string) --
The Amazon Redshift feature to which the limit applies.
LimitType (string) --
The type of limit. Depending on the feature type, this can be based on a time duration or data size.
Amount (integer) --
The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).
Period (string) --
The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly .
BreachAction (string) --
The action that Amazon Redshift takes when the limit is reached. Possible values are:
log - To log an event in a system table. The default is log.
emit-metric - To emit CloudWatch metrics.
disable - To disable the feature until the next usage period begins.
Tags (list) --
A list of tag instances.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Marker (string) --
A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
:return: {
'UsageLimits': [
{
'UsageLimitId': 'string',
'ClusterIdentifier': 'string',
'FeatureType': 'spectrum'|'concurrency-scaling',
'LimitType': 'time'|'data-scanned',
'Amount': 123,
'Period': 'daily'|'weekly'|'monthly',
'BreachAction': 'log'|'emit-metric'|'disable',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Marker': 'string'
}
:returns:
UsageLimitId (string) -- The identifier of the usage limit to describe.
ClusterIdentifier (string) -- The identifier of the cluster for which you want to describe usage limits.
FeatureType (string) -- The feature type for which you want to describe usage limits.
MaxRecords (integer) -- The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
Default: 100
Constraints: minimum 20, maximum 100.
Marker (string) -- An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeUsageLimits request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
TagKeys (list) -- A tag key or keys for which you want to return all matching usage limit objects that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the usage limit objects have either or both of these tag keys associated with them.
(string) --
TagValues (list) -- A tag value or values for which you want to return all matching usage limit objects that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the usage limit objects that have either or both of these tag values associated with them.
(string) --
"""
pass
def disable_logging(ClusterIdentifier=None):
"""
Stops logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.disable_logging(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the cluster on which logging is to be stopped.\nExample: examplecluster\n
:rtype: dict
ReturnsResponse Syntax{
'LoggingEnabled': True|False,
'BucketName': 'string',
'S3KeyPrefix': 'string',
'LastSuccessfulDeliveryTime': datetime(2015, 1, 1),
'LastFailureTime': datetime(2015, 1, 1),
'LastFailureMessage': 'string'
}
Response Structure
(dict) --Describes the status of logging for a cluster.
LoggingEnabled (boolean) --
true if logging is on, false if logging is off.
BucketName (string) --The name of the S3 bucket where the log files are stored.
S3KeyPrefix (string) --The prefix applied to the log file names.
LastSuccessfulDeliveryTime (datetime) --The last time that logs were delivered.
LastFailureTime (datetime) --The last time when logs failed to be delivered.
LastFailureMessage (string) --The message indicating that logs failed to be delivered.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
:return: {
'LoggingEnabled': True|False,
'BucketName': 'string',
'S3KeyPrefix': 'string',
'LastSuccessfulDeliveryTime': datetime(2015, 1, 1),
'LastFailureTime': datetime(2015, 1, 1),
'LastFailureMessage': 'string'
}
"""
pass
def disable_snapshot_copy(ClusterIdentifier=None):
"""
Disables the automatic copying of snapshots from one region to another region for a specified cluster.
If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the CMK in the destination region.
See also: AWS API Documentation
Exceptions
:example: response = client.disable_snapshot_copy(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of the source cluster that you want to disable copying of snapshots to a destination region.\nConstraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.\n
:rtype: dict
ReturnsResponse Syntax{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --Describes a cluster.
ClusterIdentifier (string) --The unique identifier of the cluster.
NodeType (string) --The node type for the nodes in the cluster.
ClusterStatus (string) --The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --The connection endpoint.
Address (string) --The DNS address of the Cluster.
Port (integer) --The port that the database engine is listening on.
ClusterCreateTime (datetime) --The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --Describes a cluster security group.
ClusterSecurityGroupName (string) --The name of the cluster security group.
Status (string) --The status of the cluster security group.
VpcSecurityGroups (list) --A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --Describes the members of a VPC security group.
VpcSecurityGroupId (string) --The identifier of the VPC security group.
Status (string) --The status of the VPC security group.
ClusterParameterGroups (list) --The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --Describes the status of a parameter group.
ParameterGroupName (string) --The name of the cluster parameter group.
ParameterApplyStatus (string) --The status of parameter updates.
ClusterParameterStatusList (list) --The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --Describes the status of a parameter group.
ParameterName (string) --The name of the parameter.
ParameterApplyStatus (string) --The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --The pending or in-progress change of the master user password for the cluster.
NodeType (string) --The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --The pending or in-progress change of the cluster type.
ClusterVersion (string) --The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --The name of the snapshot copy grant.
ClusterPublicKey (string) --The public key for the cluster.
ClusterNodes (list) --The nodes in the cluster.
(dict) --The identifier of a node in a cluster.
NodeRole (string) --Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --The private IP address of a node within a cluster.
PublicIPAddress (string) --The public IP address of a node within a cluster.
ElasticIpStatus (dict) --The status of the elastic IP (EIP) address.
ElasticIp (string) --The elastic IP (EIP) address for the cluster.
Status (string) --The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --The specific revision number of the database in the cluster.
Tags (list) --The list of tags for the cluster.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --Describes a group of DeferredMaintenanceWindow objects.
(dict) --Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --Returns the value ClassicResize .
AllowCancelResize (boolean) --A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.SnapshotCopyAlreadyDisabledFault
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.UnauthorizedOperation
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
"""
pass
def enable_logging(ClusterIdentifier=None, BucketName=None, S3KeyPrefix=None):
"""
Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.enable_logging(
ClusterIdentifier='string',
BucketName='string',
S3KeyPrefix='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the cluster on which logging is to be started.\nExample: examplecluster\n
:type BucketName: string
:param BucketName: [REQUIRED]\nThe name of an existing S3 bucket where the log files are to be stored.\nConstraints:\n\nMust be in the same region as the cluster\nThe cluster must have read bucket and put object permissions\n\n
:type S3KeyPrefix: string
:param S3KeyPrefix: The prefix applied to the log file names.\nConstraints:\n\nCannot exceed 512 characters\nCannot contain spaces( ), double quotes ('), single quotes (\'), a backslash (), or control characters. The hexadecimal codes for invalid characters are:\nx00 to x20\nx22\nx27\nx5c\nx7f or larger\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'LoggingEnabled': True|False,
'BucketName': 'string',
'S3KeyPrefix': 'string',
'LastSuccessfulDeliveryTime': datetime(2015, 1, 1),
'LastFailureTime': datetime(2015, 1, 1),
'LastFailureMessage': 'string'
}
Response Structure
(dict) --
Describes the status of logging for a cluster.
LoggingEnabled (boolean) --
true if logging is on, false if logging is off.
BucketName (string) --
The name of the S3 bucket where the log files are stored.
S3KeyPrefix (string) --
The prefix applied to the log file names.
LastSuccessfulDeliveryTime (datetime) --
The last time that logs were delivered.
LastFailureTime (datetime) --
The last time when logs failed to be delivered.
LastFailureMessage (string) --
The message indicating that logs failed to be delivered.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.BucketNotFoundFault
Redshift.Client.exceptions.InsufficientS3BucketPolicyFault
Redshift.Client.exceptions.InvalidS3KeyPrefixFault
Redshift.Client.exceptions.InvalidS3BucketNameFault
Redshift.Client.exceptions.InvalidClusterStateFault
:return: {
'LoggingEnabled': True|False,
'BucketName': 'string',
'S3KeyPrefix': 'string',
'LastSuccessfulDeliveryTime': datetime(2015, 1, 1),
'LastFailureTime': datetime(2015, 1, 1),
'LastFailureMessage': 'string'
}
:returns:
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.BucketNotFoundFault
Redshift.Client.exceptions.InsufficientS3BucketPolicyFault
Redshift.Client.exceptions.InvalidS3KeyPrefixFault
Redshift.Client.exceptions.InvalidS3BucketNameFault
Redshift.Client.exceptions.InvalidClusterStateFault
"""
pass
def enable_snapshot_copy(ClusterIdentifier=None, DestinationRegion=None, RetentionPeriod=None, SnapshotCopyGrantName=None, ManualSnapshotRetentionPeriod=None):
"""
Enables the automatic copy of snapshots from one region to another region for a specified cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.enable_snapshot_copy(
ClusterIdentifier='string',
DestinationRegion='string',
RetentionPeriod=123,
SnapshotCopyGrantName='string',
ManualSnapshotRetentionPeriod=123
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of the source cluster to copy snapshots from.\nConstraints: Must be the valid name of an existing cluster that does not already have cross-region snapshot copy enabled.\n
:type DestinationRegion: string
:param DestinationRegion: [REQUIRED]\nThe destination AWS Region that you want to copy snapshots to.\nConstraints: Must be the name of a valid AWS Region. For more information, see Regions and Endpoints in the Amazon Web Services General Reference.\n
:type RetentionPeriod: integer
:param RetentionPeriod: The number of days to retain automated snapshots in the destination region after they are copied from the source region.\nDefault: 7.\nConstraints: Must be at least 1 and no more than 35.\n
:type SnapshotCopyGrantName: string
:param SnapshotCopyGrantName: The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region.
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod: The number of days to retain newly copied snapshots in the destination AWS Region after they are copied from the source AWS Region. If the value is -1, the manual snapshot is retained indefinitely.\nThe value must be either -1 or an integer between 1 and 3,653.\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.IncompatibleOrderableOptions
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.CopyToRegionDisabledFault
Redshift.Client.exceptions.SnapshotCopyAlreadyEnabledFault
Redshift.Client.exceptions.UnknownSnapshotCopyRegionFault
Redshift.Client.exceptions.UnauthorizedOperation
Redshift.Client.exceptions.SnapshotCopyGrantNotFoundFault
Redshift.Client.exceptions.LimitExceededFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
Redshift.Client.exceptions.InvalidRetentionPeriodFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_cluster_credentials(DbUser=None, DbName=None, ClusterIdentifier=None, DurationSeconds=None, AutoCreate=None, DbGroups=None):
"""
Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True . You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Using IAM Authentication to Generate Database User Credentials in the Amazon Redshift Cluster Management Guide.
The AWS Identity and Access Management (IAM)user or role that executes GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see Resource Policies for GetClusterCredentials in the Amazon Redshift Cluster Management Guide.
If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups .
In addition, if the AutoCreate parameter is set to True , then the policy must include the redshift:CreateClusterUser privilege.
If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.
See also: AWS API Documentation
Exceptions
:example: response = client.get_cluster_credentials(
DbUser='string',
DbName='string',
ClusterIdentifier='string',
DurationSeconds=123,
AutoCreate=True|False,
DbGroups=[
'string',
]
)
:type DbUser: string
:param DbUser: [REQUIRED]\nThe name of a database user. If a user name matching DbUser exists in the database, the temporary user credentials have the same permissions as the existing user. If DbUser doesn\'t exist in the database and Autocreate is True , a new user is created using the value for DbUser with PUBLIC permissions. If a database user matching the value for DbUser doesn\'t exist and Autocreate is False , then the command succeeds but the connection attempt will fail because the user doesn\'t exist in the database.\nFor more information, see CREATE USER in the Amazon Redshift Database Developer Guide.\nConstraints:\n\nMust be 1 to 64 alphanumeric characters or hyphens. The user name can\'t be PUBLIC .\nMust contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.\nFirst character must be a letter.\nMust not contain a colon ( : ) or slash ( / ).\nCannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.\n\n
:type DbName: string
:param DbName: The name of a database that DbUser is authorized to log on to. If DbName is not specified, DbUser can log on to any existing database.\nConstraints:\n\nMust be 1 to 64 alphanumeric characters or hyphens\nMust contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.\nFirst character must be a letter.\nMust not contain a colon ( : ) or slash ( / ).\nCannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.\n\n
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of the cluster that contains the database for which your are requesting credentials. This parameter is case sensitive.\n
:type DurationSeconds: integer
:param DurationSeconds: The number of seconds until the returned temporary password expires.\nConstraint: minimum 900, maximum 3600.\nDefault: 900\n
:type AutoCreate: boolean
:param AutoCreate: Create a database user with the name specified for the user named in DbUser if one does not exist.
:type DbGroups: list
:param DbGroups: A list of the names of existing database groups that the user named in DbUser will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC.\nDatabase group name constraints\n\nMust be 1 to 64 alphanumeric characters or hyphens\nMust contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.\nFirst character must be a letter.\nMust not contain a colon ( : ) or slash ( / ).\nCannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.\n\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DbUser': 'string',
'DbPassword': 'string',
'Expiration': datetime(2015, 1, 1)
}
Response Structure
(dict) --
Temporary credentials with authorization to log on to an Amazon Redshift database.
DbUser (string) --
A database user name that is authorized to log on to the database DbName using the password DbPassword . If the specified DbUser exists in the database, the new user name has the same database privileges as the the user named in DbUser. By default, the user is added to PUBLIC. If the DbGroups parameter is specifed, DbUser is added to the listed groups for any sessions created using these credentials.
DbPassword (string) --
A temporary password that authorizes the user name returned by DbUser to log on to the database DbName .
Expiration (datetime) --
The date and time the password in DbPassword expires.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
:return: {
'DbUser': 'string',
'DbPassword': 'string',
'Expiration': datetime(2015, 1, 1)
}
:returns:
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_reserved_node_exchange_offerings(ReservedNodeId=None, MaxRecords=None, Marker=None):
"""
Returns an array of DC2 ReservedNodeOfferings that matches the payment type, term, and usage price of the given DC1 reserved node.
See also: AWS API Documentation
Exceptions
:example: response = client.get_reserved_node_exchange_offerings(
ReservedNodeId='string',
MaxRecords=123,
Marker='string'
)
:type ReservedNodeId: string
:param ReservedNodeId: [REQUIRED]\nA string representing the node identifier for the DC1 Reserved Node to be exchanged.\n
:type MaxRecords: integer
:param MaxRecords: An integer setting the maximum number of ReservedNodeOfferings to retrieve.
:type Marker: string
:param Marker: A value that indicates the starting point for the next set of ReservedNodeOfferings.
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'ReservedNodeOfferings': [
{
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
},
]
}
Response Structure
(dict) --
Marker (string) --
An optional parameter that specifies the starting point for returning a set of response records. When the results of a GetReservedNodeExchangeOfferings request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.
ReservedNodeOfferings (list) --
Returns an array of ReservedNodeOffering objects.
(dict) --
Describes a reserved node offering.
ReservedNodeOfferingId (string) --
The offering identifier.
NodeType (string) --
The node type offered by the reserved node offering.
Duration (integer) --
The duration, in seconds, for which the offering will reserve the node.
FixedPrice (float) --
The upfront fixed charge you will pay to purchase the specific reserved node offering.
UsagePrice (float) --
The rate you are charged for each hour the cluster that is using the offering is running.
CurrencyCode (string) --
The currency code for the compute nodes offering.
OfferingType (string) --
The anticipated utilization of the reserved node, as defined in the reserved node offering.
RecurringCharges (list) --
The charge to your account regardless of whether you are creating any clusters using the node offering. Recurring charges are only in effect for heavy-utilization reserved nodes.
(dict) --
Describes a recurring charge.
RecurringChargeAmount (float) --
The amount charged per the period of time specified by the recurring charge frequency.
RecurringChargeFrequency (string) --
The frequency at which the recurring charge amount is applied.
ReservedNodeOfferingType (string) --
Exceptions
Redshift.Client.exceptions.ReservedNodeNotFoundFault
Redshift.Client.exceptions.InvalidReservedNodeStateFault
Redshift.Client.exceptions.ReservedNodeAlreadyMigratedFault
Redshift.Client.exceptions.ReservedNodeOfferingNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
Redshift.Client.exceptions.DependentServiceUnavailableFault
:return: {
'Marker': 'string',
'ReservedNodeOfferings': [
{
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
},
]
}
:returns:
Redshift.Client.exceptions.ReservedNodeNotFoundFault
Redshift.Client.exceptions.InvalidReservedNodeStateFault
Redshift.Client.exceptions.ReservedNodeAlreadyMigratedFault
Redshift.Client.exceptions.ReservedNodeOfferingNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
Redshift.Client.exceptions.DependentServiceUnavailableFault
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def modify_cluster(ClusterIdentifier=None, ClusterType=None, NodeType=None, NumberOfNodes=None, ClusterSecurityGroups=None, VpcSecurityGroupIds=None, MasterUserPassword=None, ClusterParameterGroupName=None, AutomatedSnapshotRetentionPeriod=None, ManualSnapshotRetentionPeriod=None, PreferredMaintenanceWindow=None, ClusterVersion=None, AllowVersionUpgrade=None, HsmClientCertificateIdentifier=None, HsmConfigurationIdentifier=None, NewClusterIdentifier=None, PubliclyAccessible=None, ElasticIp=None, EnhancedVpcRouting=None, MaintenanceTrackName=None, Encrypted=None, KmsKeyId=None):
"""
Modifies the settings for a cluster.
You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.
You can add another security or parameter group, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.modify_cluster(
ClusterIdentifier='string',
ClusterType='string',
NodeType='string',
NumberOfNodes=123,
ClusterSecurityGroups=[
'string',
],
VpcSecurityGroupIds=[
'string',
],
MasterUserPassword='string',
ClusterParameterGroupName='string',
AutomatedSnapshotRetentionPeriod=123,
ManualSnapshotRetentionPeriod=123,
PreferredMaintenanceWindow='string',
ClusterVersion='string',
AllowVersionUpgrade=True|False,
HsmClientCertificateIdentifier='string',
HsmConfigurationIdentifier='string',
NewClusterIdentifier='string',
PubliclyAccessible=True|False,
ElasticIp='string',
EnhancedVpcRouting=True|False,
MaintenanceTrackName='string',
Encrypted=True|False,
KmsKeyId='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of the cluster to be modified.\nExample: examplecluster\n
:type ClusterType: string
:param ClusterType: The new cluster type.\nWhen you submit your cluster resize request, your existing cluster goes into a read-only mode. After Amazon Redshift provisions a new cluster based on your resize requirements, there will be outage for a period while the old cluster is deleted and your connection is switched to the new cluster. You can use DescribeResize to track the progress of the resize request.\nValid Values: multi-node | single-node\n
:type NodeType: string
:param NodeType: The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.\nFor more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide .\nValid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge\n
:type NumberOfNodes: integer
:param NumberOfNodes: The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter.\nFor more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide .\nValid Values: Integer greater than 0 .\n
:type ClusterSecurityGroups: list
:param ClusterSecurityGroups: A list of cluster security groups to be authorized on this cluster. This change is asynchronously applied as soon as possible.\nSecurity groups currently associated with the cluster, and not in the list of groups to apply, will be revoked from the cluster.\nConstraints:\n\nMust be 1 to 255 alphanumeric characters or hyphens\nFirst character must be a letter\nCannot end with a hyphen or contain two consecutive hyphens\n\n\n(string) --\n\n
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: A list of virtual private cloud (VPC) security groups to be associated with the cluster. This change is asynchronously applied as soon as possible.\n\n(string) --\n\n
:type MasterUserPassword: string
:param MasterUserPassword: The new password for the cluster master user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.\n\nNote\nOperations never return the password, so this operation provides a way to regain access to the master user account for a cluster if the password is lost.\n\nDefault: Uses existing setting.\nConstraints:\n\nMust be between 8 and 64 characters in length.\nMust contain at least one uppercase letter.\nMust contain at least one lowercase letter.\nMust contain one number.\nCan be any printable ASCII character (ASCII code 33 to 126) except \' (single quote), ' (double quote), , /, @, or space.\n\n
:type ClusterParameterGroupName: string
:param ClusterParameterGroupName: The name of the cluster parameter group to apply to this cluster. This change is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster .\nDefault: Uses existing setting.\nConstraints: The cluster parameter group must be in the same parameter group family that matches the cluster version.\n
:type AutomatedSnapshotRetentionPeriod: integer
:param AutomatedSnapshotRetentionPeriod: The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot .\nIf you decrease the automated snapshot retention period from its current value, existing automated snapshots that fall outside of the new retention period will be immediately deleted.\nDefault: Uses existing setting.\nConstraints: Must be a value from 0 to 35.\n
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod: The default for number of days that a newly created manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely. This value doesn\'t retroactively change the retention periods of existing manual snapshots.\nThe value must be either -1 or an integer between 1 and 3,653.\nThe default value is -1.\n
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage.\nThis maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied.\nDefault: Uses existing setting.\nFormat: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00 .\nValid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun\nConstraints: Must be at least 30 minutes.\n
:type ClusterVersion: string
:param ClusterVersion: The new version number of the Amazon Redshift engine to upgrade to.\nFor major version upgrades, if a non-default cluster parameter group is currently in use, a new cluster parameter group in the cluster parameter group family for the new version must be specified. The new cluster parameter group can be the default for that cluster parameter group family. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .\nExample: 1.0\n
:type AllowVersionUpgrade: boolean
:param AllowVersionUpgrade: If true , major version upgrades will be applied automatically to the cluster during the maintenance window.\nDefault: false\n
:type HsmClientCertificateIdentifier: string
:param HsmClientCertificateIdentifier: Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
:type NewClusterIdentifier: string
:param NewClusterIdentifier: The new identifier for the cluster.\nConstraints:\n\nMust contain from 1 to 63 alphanumeric characters or hyphens.\nAlphabetic characters must be lowercase.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\nMust be unique for all clusters within an AWS account.\n\nExample: examplecluster\n
:type PubliclyAccessible: boolean
:param PubliclyAccessible: If true , the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available.
:type ElasticIp: string
:param ElasticIp: The Elastic IP (EIP) address for the cluster.\nConstraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide.\n
:type EnhancedVpcRouting: boolean
:param EnhancedVpcRouting: An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.\nIf this option is true , enhanced VPC routing is enabled.\nDefault: false\n
:type MaintenanceTrackName: string
:param MaintenanceTrackName: The name for the maintenance track that you want to assign for the cluster. This name change is asynchronous. The new track name stays in the PendingModifiedValues for the cluster until the next maintenance window. When the maintenance track changes, the cluster is switched to the latest cluster release available for the maintenance track. At this point, the maintenance track name is applied.
:type Encrypted: boolean
:param Encrypted: Indicates whether the cluster is encrypted. If the value is encrypted (true) and you provide a value for the KmsKeyId parameter, we encrypt the cluster with the provided KmsKeyId . If you don\'t provide a KmsKeyId , we encrypt with the default key. In the China region we use legacy encryption if you specify that the cluster is encrypted.\nIf the value is not encrypted (false), then the cluster is decrypted.\n
:type KmsKeyId: string
:param KmsKeyId: The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.InvalidClusterSecurityGroupStateFault
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.NumberOfNodesQuotaExceededFault
Redshift.Client.exceptions.NumberOfNodesPerClusterLimitExceededFault
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
Redshift.Client.exceptions.InsufficientClusterCapacityFault
Redshift.Client.exceptions.UnsupportedOptionFault
Redshift.Client.exceptions.UnauthorizedOperation
Redshift.Client.exceptions.HsmClientCertificateNotFoundFault
Redshift.Client.exceptions.HsmConfigurationNotFoundFault
Redshift.Client.exceptions.ClusterAlreadyExistsFault
Redshift.Client.exceptions.LimitExceededFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
Redshift.Client.exceptions.InvalidElasticIpFault
Redshift.Client.exceptions.TableLimitExceededFault
Redshift.Client.exceptions.InvalidClusterTrackFault
Redshift.Client.exceptions.InvalidRetentionPeriodFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def modify_cluster_db_revision(ClusterIdentifier=None, RevisionTarget=None):
"""
Modifies the database revision of a cluster. The database revision is a unique revision of the database running in a cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_cluster_db_revision(
ClusterIdentifier='string',
RevisionTarget='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of a cluster whose database revision you want to modify.\nExample: examplecluster\n
:type RevisionTarget: string
:param RevisionTarget: [REQUIRED]\nThe identifier of the database revision. You can retrieve this value from the response to the DescribeClusterDbRevisions request.\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.ClusterOnLatestRevisionFault
Redshift.Client.exceptions.InvalidClusterStateFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def modify_cluster_iam_roles(ClusterIdentifier=None, AddIamRoles=None, RemoveIamRoles=None):
"""
Modifies the list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
A cluster can have up to 10 IAM roles associated at any time.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_cluster_iam_roles(
ClusterIdentifier='string',
AddIamRoles=[
'string',
],
RemoveIamRoles=[
'string',
]
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of the cluster for which you want to associate or disassociate IAM roles.\n
:type AddIamRoles: list
:param AddIamRoles: Zero or more IAM roles to associate with the cluster. The roles must be in their Amazon Resource Name (ARN) format. You can associate up to 10 IAM roles with a single cluster in a single request.\n\n(string) --\n\n
:type RemoveIamRoles: list
:param RemoveIamRoles: Zero or more IAM roles in ARN format to disassociate from the cluster. You can disassociate up to 10 IAM roles from a single cluster in a single request.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.ClusterNotFoundFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def modify_cluster_maintenance(ClusterIdentifier=None, DeferMaintenance=None, DeferMaintenanceIdentifier=None, DeferMaintenanceStartTime=None, DeferMaintenanceEndTime=None, DeferMaintenanceDuration=None):
"""
Modifies the maintenance settings of a cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_cluster_maintenance(
ClusterIdentifier='string',
DeferMaintenance=True|False,
DeferMaintenanceIdentifier='string',
DeferMaintenanceStartTime=datetime(2015, 1, 1),
DeferMaintenanceEndTime=datetime(2015, 1, 1),
DeferMaintenanceDuration=123
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nA unique identifier for the cluster.\n
:type DeferMaintenance: boolean
:param DeferMaintenance: A boolean indicating whether to enable the deferred maintenance window.
:type DeferMaintenanceIdentifier: string
:param DeferMaintenanceIdentifier: A unique identifier for the deferred maintenance window.
:type DeferMaintenanceStartTime: datetime
:param DeferMaintenanceStartTime: A timestamp indicating the start time for the deferred maintenance window.
:type DeferMaintenanceEndTime: datetime
:param DeferMaintenanceEndTime: A timestamp indicating end time for the deferred maintenance window. If you specify an end time, you can\'t specify a duration.
:type DeferMaintenanceDuration: integer
:param DeferMaintenanceDuration: An integer indicating the duration of the maintenance window in days. If you specify a duration, you can\'t specify an end time. The duration must be 45 days or less.
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def modify_cluster_parameter_group(ParameterGroupName=None, Parameters=None):
"""
Modifies the parameters of a parameter group.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.modify_cluster_parameter_group(
ParameterGroupName='string',
Parameters=[
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'ApplyType': 'static'|'dynamic',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string'
},
]
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of the parameter group to be modified.\n
:type Parameters: list
:param Parameters: [REQUIRED]\nAn array of parameters to be modified. A maximum of 20 parameters can be modified in a single request.\nFor each parameter to be modified, you must supply at least the parameter name and parameter value; other name-value pairs of the parameter are optional.\nFor the workload management (WLM) configuration, you must supply all the name-value pairs in the wlm_json_configuration parameter.\n\n(dict) --Describes a parameter in a cluster parameter group.\n\nParameterName (string) --The name of the parameter.\n\nParameterValue (string) --The value of the parameter.\n\nDescription (string) --A description of the parameter.\n\nSource (string) --The source of the parameter value, such as 'engine-default' or 'user'.\n\nDataType (string) --The data type of the parameter.\n\nAllowedValues (string) --The valid range of values for the parameter.\n\nApplyType (string) --Specifies how to apply the WLM configuration parameter. Some properties can be applied dynamically, while other properties require that any associated clusters be rebooted for the configuration changes to be applied. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .\n\nIsModifiable (boolean) --If true , the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.\n\nMinimumEngineVersion (string) --The earliest engine version to which the parameter can apply.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ParameterGroupName': 'string',
'ParameterGroupStatus': 'string'
}
Response Structure
(dict) --
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterGroupStatus (string) --
The status of the parameter group. For example, if you made a change to a parameter group name-value pair, then the change could be pending a reboot of an associated cluster.
Exceptions
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
Redshift.Client.exceptions.InvalidClusterParameterGroupStateFault
:return: {
'ParameterGroupName': 'string',
'ParameterGroupStatus': 'string'
}
:returns:
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
Redshift.Client.exceptions.InvalidClusterParameterGroupStateFault
"""
pass
def modify_cluster_snapshot(SnapshotIdentifier=None, ManualSnapshotRetentionPeriod=None, Force=None):
"""
Modifies the settings for a snapshot.
This exanmple modifies the manual retention period setting for a cluster snapshot.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_cluster_snapshot(
SnapshotIdentifier='string',
ManualSnapshotRetentionPeriod=123,
Force=True|False
)
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]\nThe identifier of the snapshot whose setting you want to modify.\n
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod: The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.\nIf the manual snapshot falls outside of the new retention period, you can specify the force option to immediately delete the snapshot.\nThe value must be either -1 or an integer between 1 and 3,653.\n
:type Force: boolean
:param Force: A Boolean option to override an exception if the retention period has already passed.
:rtype: dict
ReturnsResponse Syntax
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Snapshot (dict) --
Describes a snapshot.
SnapshotIdentifier (string) --
The snapshot identifier that is provided in the request.
ClusterIdentifier (string) --
The identifier of the cluster for which the snapshot was taken.
SnapshotCreateTime (datetime) --
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
Status (string) --
The snapshot status. The value of the status depends on the API operation used:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
Port (integer) --
The port that the cluster is listening on.
AvailabilityZone (string) --
The Availability Zone in which the cluster was created.
ClusterCreateTime (datetime) --
The time (UTC) when the cluster was originally created.
MasterUsername (string) --
The master user name for the cluster.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
SnapshotType (string) --
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
NodeType (string) --
The node type of the nodes in the cluster.
NumberOfNodes (integer) --
The number of nodes in the cluster.
DBName (string) --
The name of the database that was created when the cluster was created.
VpcId (string) --
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
Encrypted (boolean) --
If true , the data in the snapshot is encrypted at rest.
KmsKeyId (string) --
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
EncryptedWithHSM (boolean) --
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.
AccountsWithRestoreAccess (list) --
A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.
(dict) --
Describes an AWS customer account authorized to restore a snapshot.
AccountId (string) --
The identifier of an AWS customer account authorized to restore a snapshot.
AccountAlias (string) --
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support .
OwnerAccount (string) --
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
TotalBackupSizeInMegaBytes (float) --
The size of the complete set of backup data that would be used to restore the cluster.
ActualIncrementalBackupSizeInMegaBytes (float) --
The size of the incremental backup.
BackupProgressInMegaBytes (float) --
The number of megabytes that have been transferred to the snapshot backup.
CurrentBackupRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.
EstimatedSecondsToCompletion (integer) --
The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
SourceRegion (string) --
The source region from which the snapshot was copied.
Tags (list) --
The list of tags for the cluster snapshot.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
RestorableNodeTypes (list) --
The list of node types that this cluster snapshot is able to restore into.
(string) --
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track for the snapshot.
ManualSnapshotRetentionPeriod (integer) --
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
ManualSnapshotRemainingDays (integer) --
The number of days until a manual snapshot will pass its retention period.
SnapshotRetentionStartTime (datetime) --
A timestamp representing the start of the retention period for the snapshot.
Exceptions
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.InvalidRetentionPeriodFault
:return: {
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def modify_cluster_snapshot_schedule(ClusterIdentifier=None, ScheduleIdentifier=None, DisassociateSchedule=None):
"""
Modifies a snapshot schedule for a cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_cluster_snapshot_schedule(
ClusterIdentifier='string',
ScheduleIdentifier='string',
DisassociateSchedule=True|False
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nA unique identifier for the cluster whose snapshot schedule you want to modify.\n
:type ScheduleIdentifier: string
:param ScheduleIdentifier: A unique alphanumeric identifier for the schedule that you want to associate with the cluster.
:type DisassociateSchedule: boolean
:param DisassociateSchedule: A boolean to indicate whether to remove the assoiciation between the cluster and the schedule.
:returns:
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.SnapshotScheduleNotFoundFault
Redshift.Client.exceptions.InvalidClusterSnapshotScheduleStateFault
"""
pass
def modify_cluster_subnet_group(ClusterSubnetGroupName=None, Description=None, SubnetIds=None):
"""
Modifies a cluster subnet group to include the specified list of VPC subnets. The operation replaces the existing list of subnets with the new list of subnets.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_cluster_subnet_group(
ClusterSubnetGroupName='string',
Description='string',
SubnetIds=[
'string',
]
)
:type ClusterSubnetGroupName: string
:param ClusterSubnetGroupName: [REQUIRED]\nThe name of the subnet group to be modified.\n
:type Description: string
:param Description: A text description of the subnet group to be modified.
:type SubnetIds: list
:param SubnetIds: [REQUIRED]\nAn array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ClusterSubnetGroup': {
'ClusterSubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string',
'SupportedPlatforms': [
{
'Name': 'string'
},
]
},
'SubnetStatus': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
ClusterSubnetGroup (dict) --
Describes a subnet group.
ClusterSubnetGroupName (string) --
The name of the cluster subnet group.
Description (string) --
The description of the cluster subnet group.
VpcId (string) --
The VPC ID of the cluster subnet group.
SubnetGroupStatus (string) --
The status of the cluster subnet group. Possible values are Complete , Incomplete and Invalid .
Subnets (list) --
A list of the VPC Subnet elements.
(dict) --
Describes a subnet.
SubnetIdentifier (string) --
The identifier of the subnet.
SubnetAvailabilityZone (dict) --
Name (string) --
The name of the availability zone.
SupportedPlatforms (list) --
(dict) --
A list of supported platforms for orderable clusters.
Name (string) --
SubnetStatus (string) --
The status of the subnet.
Tags (list) --
The list of tags for the cluster subnet group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterSubnetGroupNotFoundFault
Redshift.Client.exceptions.ClusterSubnetQuotaExceededFault
Redshift.Client.exceptions.SubnetAlreadyInUse
Redshift.Client.exceptions.InvalidSubnet
Redshift.Client.exceptions.UnauthorizedOperation
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
:return: {
'ClusterSubnetGroup': {
'ClusterSubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string',
'SupportedPlatforms': [
{
'Name': 'string'
},
]
},
'SubnetStatus': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Name (string) --
"""
pass
def modify_event_subscription(SubscriptionName=None, SnsTopicArn=None, SourceType=None, SourceIds=None, EventCategories=None, Severity=None, Enabled=None):
"""
Modifies an existing Amazon Redshift event notification subscription.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_event_subscription(
SubscriptionName='string',
SnsTopicArn='string',
SourceType='string',
SourceIds=[
'string',
],
EventCategories=[
'string',
],
Severity='string',
Enabled=True|False
)
:type SubscriptionName: string
:param SubscriptionName: [REQUIRED]\nThe name of the modified Amazon Redshift event notification subscription.\n
:type SnsTopicArn: string
:param SnsTopicArn: The Amazon Resource Name (ARN) of the SNS topic to be used by the event notification subscription.
:type SourceType: string
:param SourceType: The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.\nValid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.\n
:type SourceIds: list
:param SourceIds: A list of one or more identifiers of Amazon Redshift source objects. All of the objects must be of the same type as was specified in the source type parameter. The event subscription will return only events generated by the specified objects. If not specified, then events are returned for all objects within the source type specified.\nExample: my-cluster-1, my-cluster-2\nExample: my-snapshot-20131010\n\n(string) --\n\n
:type EventCategories: list
:param EventCategories: Specifies the Amazon Redshift event categories to be published by the event notification subscription.\nValues: configuration, management, monitoring, security\n\n(string) --\n\n
:type Severity: string
:param Severity: Specifies the Amazon Redshift event severity to be published by the event notification subscription.\nValues: ERROR, INFO\n
:type Enabled: boolean
:param Enabled: A Boolean value indicating if the subscription is enabled. true indicates the subscription is enabled
:rtype: dict
ReturnsResponse Syntax
{
'EventSubscription': {
'CustomerAwsId': 'string',
'CustSubscriptionId': 'string',
'SnsTopicArn': 'string',
'Status': 'string',
'SubscriptionCreationTime': datetime(2015, 1, 1),
'SourceType': 'string',
'SourceIdsList': [
'string',
],
'EventCategoriesList': [
'string',
],
'Severity': 'string',
'Enabled': True|False,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
EventSubscription (dict) --
Describes event subscriptions.
CustomerAwsId (string) --
The AWS customer account associated with the Amazon Redshift event notification subscription.
CustSubscriptionId (string) --
The name of the Amazon Redshift event notification subscription.
SnsTopicArn (string) --
The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event notification subscription.
Status (string) --
The status of the Amazon Redshift event notification subscription.
Constraints:
Can be one of the following: active | no-permission | topic-not-exist
The status "no-permission" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.
SubscriptionCreationTime (datetime) --
The date and time the Amazon Redshift event notification subscription was created.
SourceType (string) --
The source type of the events returned by the Amazon Redshift event notification, such as cluster, cluster-snapshot, cluster-parameter-group, cluster-security-group, or scheduled-action.
SourceIdsList (list) --
A list of the sources that publish events to the Amazon Redshift event notification subscription.
(string) --
EventCategoriesList (list) --
The list of Amazon Redshift event categories specified in the event notification subscription.
Values: Configuration, Management, Monitoring, Security
(string) --
Severity (string) --
The event severity specified in the Amazon Redshift event notification subscription.
Values: ERROR, INFO
Enabled (boolean) --
A boolean value indicating whether the subscription is enabled; true indicates that the subscription is enabled.
Tags (list) --
The list of tags for the event subscription.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.SubscriptionNotFoundFault
Redshift.Client.exceptions.SNSInvalidTopicFault
Redshift.Client.exceptions.SNSNoAuthorizationFault
Redshift.Client.exceptions.SNSTopicArnNotFoundFault
Redshift.Client.exceptions.SubscriptionEventIdNotFoundFault
Redshift.Client.exceptions.SubscriptionCategoryNotFoundFault
Redshift.Client.exceptions.SubscriptionSeverityNotFoundFault
Redshift.Client.exceptions.SourceNotFoundFault
Redshift.Client.exceptions.InvalidSubscriptionStateFault
:return: {
'EventSubscription': {
'CustomerAwsId': 'string',
'CustSubscriptionId': 'string',
'SnsTopicArn': 'string',
'Status': 'string',
'SubscriptionCreationTime': datetime(2015, 1, 1),
'SourceType': 'string',
'SourceIdsList': [
'string',
],
'EventCategoriesList': [
'string',
],
'Severity': 'string',
'Enabled': True|False,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Can be one of the following: active | no-permission | topic-not-exist
The status "no-permission" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.
"""
pass
def modify_scheduled_action(ScheduledActionName=None, TargetAction=None, Schedule=None, IamRole=None, ScheduledActionDescription=None, StartTime=None, EndTime=None, Enable=None):
"""
Modifies a scheduled action.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_scheduled_action(
ScheduledActionName='string',
TargetAction={
'ResizeCluster': {
'ClusterIdentifier': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'Classic': True|False
},
'PauseCluster': {
'ClusterIdentifier': 'string'
},
'ResumeCluster': {
'ClusterIdentifier': 'string'
}
},
Schedule='string',
IamRole='string',
ScheduledActionDescription='string',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Enable=True|False
)
:type ScheduledActionName: string
:param ScheduledActionName: [REQUIRED]\nThe name of the scheduled action to modify.\n
:type TargetAction: dict
:param TargetAction: A modified JSON format of the scheduled action. For more information about this parameter, see ScheduledAction .\n\nResizeCluster (dict) --An action that runs a ResizeCluster API operation.\n\nClusterIdentifier (string) -- [REQUIRED]The unique identifier for the cluster to resize.\n\nClusterType (string) --The new cluster type for the specified cluster.\n\nNodeType (string) --The new node type for the nodes you are adding. If not specified, the cluster\'s current node type is used.\n\nNumberOfNodes (integer) --The new number of nodes for the cluster.\n\nClassic (boolean) --A boolean value indicating whether the resize operation is using the classic resize process. If you don\'t provide this parameter or set the value to false , the resize type is elastic.\n\n\n\nPauseCluster (dict) --An action that runs a PauseCluster API operation.\n\nClusterIdentifier (string) -- [REQUIRED]The identifier of the cluster to be paused.\n\n\n\nResumeCluster (dict) --An action that runs a ResumeCluster API operation.\n\nClusterIdentifier (string) -- [REQUIRED]The identifier of the cluster to be resumed.\n\n\n\n\n
:type Schedule: string
:param Schedule: A modified schedule in either at( ) or cron( ) format. For more information about this parameter, see ScheduledAction .
:type IamRole: string
:param IamRole: A different IAM role to assume to run the target action. For more information about this parameter, see ScheduledAction .
:type ScheduledActionDescription: string
:param ScheduledActionDescription: A modified description of the scheduled action.
:type StartTime: datetime
:param StartTime: A modified start time of the scheduled action. For more information about this parameter, see ScheduledAction .
:type EndTime: datetime
:param EndTime: A modified end time of the scheduled action. For more information about this parameter, see ScheduledAction .
:type Enable: boolean
:param Enable: A modified enable flag of the scheduled action. If true, the scheduled action is active. If false, the scheduled action is disabled.
:rtype: dict
ReturnsResponse Syntax
{
'ScheduledActionName': 'string',
'TargetAction': {
'ResizeCluster': {
'ClusterIdentifier': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'Classic': True|False
},
'PauseCluster': {
'ClusterIdentifier': 'string'
},
'ResumeCluster': {
'ClusterIdentifier': 'string'
}
},
'Schedule': 'string',
'IamRole': 'string',
'ScheduledActionDescription': 'string',
'State': 'ACTIVE'|'DISABLED',
'NextInvocations': [
datetime(2015, 1, 1),
],
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
}
Response Structure
(dict) --
Describes a scheduled action. You can use a scheduled action to trigger some Amazon Redshift API operations on a schedule. For information about which API operations can be scheduled, see ScheduledActionType .
ScheduledActionName (string) --
The name of the scheduled action.
TargetAction (dict) --
A JSON format string of the Amazon Redshift API operation with input parameters.
"{\\"ResizeCluster\\":{\\"NodeType\\":\\"ds2.8xlarge\\",\\"ClusterIdentifier\\":\\"my-test-cluster\\",\\"NumberOfNodes\\":3}} ".
ResizeCluster (dict) --
An action that runs a ResizeCluster API operation.
ClusterIdentifier (string) --
The unique identifier for the cluster to resize.
ClusterType (string) --
The new cluster type for the specified cluster.
NodeType (string) --
The new node type for the nodes you are adding. If not specified, the cluster\'s current node type is used.
NumberOfNodes (integer) --
The new number of nodes for the cluster.
Classic (boolean) --
A boolean value indicating whether the resize operation is using the classic resize process. If you don\'t provide this parameter or set the value to false , the resize type is elastic.
PauseCluster (dict) --
An action that runs a PauseCluster API operation.
ClusterIdentifier (string) --
The identifier of the cluster to be paused.
ResumeCluster (dict) --
An action that runs a ResumeCluster API operation.
ClusterIdentifier (string) --
The identifier of the cluster to be resumed.
Schedule (string) --
The schedule for a one-time (at format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour.
Format of at expressions is "at(yyyy-mm-ddThh:mm:ss) ". For example, "at(2016-03-04T17:27:00) ".
Format of cron expressions is "cron(Minutes Hours Day-of-month Month Day-of-week Year) ". For example, "cron(0 10 ? * MON *) ". For more information, see Cron Expressions in the Amazon CloudWatch Events User Guide .
IamRole (string) --
The IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide .
ScheduledActionDescription (string) --
The description of the scheduled action.
State (string) --
The state of the scheduled action. For example, DISABLED .
NextInvocations (list) --
List of times when the scheduled action will run.
(datetime) --
StartTime (datetime) --
The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger.
EndTime (datetime) --
The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.
Exceptions
Redshift.Client.exceptions.ScheduledActionNotFoundFault
Redshift.Client.exceptions.ScheduledActionTypeUnsupportedFault
Redshift.Client.exceptions.InvalidScheduleFault
Redshift.Client.exceptions.InvalidScheduledActionFault
Redshift.Client.exceptions.UnauthorizedOperation
:return: {
'ScheduledActionName': 'string',
'TargetAction': {
'ResizeCluster': {
'ClusterIdentifier': 'string',
'ClusterType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'Classic': True|False
},
'PauseCluster': {
'ClusterIdentifier': 'string'
},
'ResumeCluster': {
'ClusterIdentifier': 'string'
}
},
'Schedule': 'string',
'IamRole': 'string',
'ScheduledActionDescription': 'string',
'State': 'ACTIVE'|'DISABLED',
'NextInvocations': [
datetime(2015, 1, 1),
],
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
}
:returns:
(datetime) --
"""
pass
def modify_snapshot_copy_retention_period(ClusterIdentifier=None, RetentionPeriod=None, Manual=None):
"""
Modifies the number of days to retain snapshots in the destination AWS Region after they are copied from the source AWS Region. By default, this operation only changes the retention period of copied automated snapshots. The retention periods for both new and existing copied automated snapshots are updated with the new retention period. You can set the manual option to change only the retention periods of copied manual snapshots. If you set this option, only newly copied manual snapshots have the new retention period.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_snapshot_copy_retention_period(
ClusterIdentifier='string',
RetentionPeriod=123,
Manual=True|False
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of the cluster for which you want to change the retention period for either automated or manual snapshots that are copied to a destination AWS Region.\nConstraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.\n
:type RetentionPeriod: integer
:param RetentionPeriod: [REQUIRED]\nThe number of days to retain automated snapshots in the destination AWS Region after they are copied from the source AWS Region.\nBy default, this only changes the retention period of copied automated snapshots.\nIf you decrease the retention period for automated snapshots that are copied to a destination AWS Region, Amazon Redshift deletes any existing automated snapshots that were copied to the destination AWS Region and that fall outside of the new retention period.\nConstraints: Must be at least 1 and no more than 35 for automated snapshots.\nIf you specify the manual option, only newly copied manual snapshots will have the new retention period.\nIf you specify the value of -1 newly copied manual snapshots are retained indefinitely.\nConstraints: The number of days must be either -1 or an integer between 1 and 3,653 for manual snapshots.\n
:type Manual: boolean
:param Manual: Indicates whether to apply the snapshot retention period to newly copied manual snapshots instead of automated snapshots.
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.SnapshotCopyDisabledFault
Redshift.Client.exceptions.UnauthorizedOperation
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.InvalidRetentionPeriodFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def modify_snapshot_schedule(ScheduleIdentifier=None, ScheduleDefinitions=None):
"""
Modifies a snapshot schedule. Any schedule associated with a cluster is modified asynchronously.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_snapshot_schedule(
ScheduleIdentifier='string',
ScheduleDefinitions=[
'string',
]
)
:type ScheduleIdentifier: string
:param ScheduleIdentifier: [REQUIRED]\nA unique alphanumeric identifier of the schedule to modify.\n
:type ScheduleDefinitions: list
:param ScheduleDefinitions: [REQUIRED]\nAn updated list of schedule definitions. A schedule definition is made up of schedule expressions, for example, 'cron(30 12 *)' or 'rate(12 hours)'.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ScheduleDefinitions': [
'string',
],
'ScheduleIdentifier': 'string',
'ScheduleDescription': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextInvocations': [
datetime(2015, 1, 1),
],
'AssociatedClusterCount': 123,
'AssociatedClusters': [
{
'ClusterIdentifier': 'string',
'ScheduleAssociationState': 'MODIFYING'|'ACTIVE'|'FAILED'
},
]
}
Response Structure
(dict) --
Describes a snapshot schedule. You can set a regular interval for creating snapshots of a cluster. You can also schedule snapshots for specific dates.
ScheduleDefinitions (list) --
A list of ScheduleDefinitions.
(string) --
ScheduleIdentifier (string) --
A unique identifier for the schedule.
ScheduleDescription (string) --
The description of the schedule.
Tags (list) --
An optional set of tags describing the schedule.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
NextInvocations (list) --
(datetime) --
AssociatedClusterCount (integer) --
The number of clusters associated with the schedule.
AssociatedClusters (list) --
A list of clusters associated with the schedule. A maximum of 100 clusters is returned.
(dict) --
ClusterIdentifier (string) --
ScheduleAssociationState (string) --
Exceptions
Redshift.Client.exceptions.InvalidScheduleFault
Redshift.Client.exceptions.SnapshotScheduleNotFoundFault
Redshift.Client.exceptions.SnapshotScheduleUpdateInProgressFault
:return: {
'ScheduleDefinitions': [
'string',
],
'ScheduleIdentifier': 'string',
'ScheduleDescription': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextInvocations': [
datetime(2015, 1, 1),
],
'AssociatedClusterCount': 123,
'AssociatedClusters': [
{
'ClusterIdentifier': 'string',
'ScheduleAssociationState': 'MODIFYING'|'ACTIVE'|'FAILED'
},
]
}
:returns:
(string) --
"""
pass
def modify_usage_limit(UsageLimitId=None, Amount=None, BreachAction=None):
"""
Modifies a usage limit in a cluster. You can\'t modify the feature type or period of a usage limit.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_usage_limit(
UsageLimitId='string',
Amount=123,
BreachAction='log'|'emit-metric'|'disable'
)
:type UsageLimitId: string
:param UsageLimitId: [REQUIRED]\nThe identifier of the usage limit to modify.\n
:type Amount: integer
:param Amount: The new limit amount. For more information about this parameter, see UsageLimit .
:type BreachAction: string
:param BreachAction: The new action that Amazon Redshift takes when the limit is reached. For more information about this parameter, see UsageLimit .
:rtype: dict
ReturnsResponse Syntax
{
'UsageLimitId': 'string',
'ClusterIdentifier': 'string',
'FeatureType': 'spectrum'|'concurrency-scaling',
'LimitType': 'time'|'data-scanned',
'Amount': 123,
'Period': 'daily'|'weekly'|'monthly',
'BreachAction': 'log'|'emit-metric'|'disable',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
Response Structure
(dict) --
Describes a usage limit object for a cluster.
UsageLimitId (string) --
The identifier of the usage limit.
ClusterIdentifier (string) --
The identifier of the cluster with a usage limit.
FeatureType (string) --
The Amazon Redshift feature to which the limit applies.
LimitType (string) --
The type of limit. Depending on the feature type, this can be based on a time duration or data size.
Amount (integer) --
The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).
Period (string) --
The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly .
BreachAction (string) --
The action that Amazon Redshift takes when the limit is reached. Possible values are:
log - To log an event in a system table. The default is log.
emit-metric - To emit CloudWatch metrics.
disable - To disable the feature until the next usage period begins.
Tags (list) --
A list of tag instances.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.InvalidUsageLimitFault
Redshift.Client.exceptions.UsageLimitNotFoundFault
Redshift.Client.exceptions.UnsupportedOperationFault
:return: {
'UsageLimitId': 'string',
'ClusterIdentifier': 'string',
'FeatureType': 'spectrum'|'concurrency-scaling',
'LimitType': 'time'|'data-scanned',
'Amount': 123,
'Period': 'daily'|'weekly'|'monthly',
'BreachAction': 'log'|'emit-metric'|'disable',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
log - To log an event in a system table. The default is log.
emit-metric - To emit CloudWatch metrics.
disable - To disable the feature until the next usage period begins.
"""
pass
def pause_cluster(ClusterIdentifier=None):
"""
Pauses a cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.pause_cluster(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the cluster to be paused.\n
:rtype: dict
ReturnsResponse Syntax{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --Describes a cluster.
ClusterIdentifier (string) --The unique identifier of the cluster.
NodeType (string) --The node type for the nodes in the cluster.
ClusterStatus (string) --The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --The connection endpoint.
Address (string) --The DNS address of the Cluster.
Port (integer) --The port that the database engine is listening on.
ClusterCreateTime (datetime) --The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --Describes a cluster security group.
ClusterSecurityGroupName (string) --The name of the cluster security group.
Status (string) --The status of the cluster security group.
VpcSecurityGroups (list) --A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --Describes the members of a VPC security group.
VpcSecurityGroupId (string) --The identifier of the VPC security group.
Status (string) --The status of the VPC security group.
ClusterParameterGroups (list) --The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --Describes the status of a parameter group.
ParameterGroupName (string) --The name of the cluster parameter group.
ParameterApplyStatus (string) --The status of parameter updates.
ClusterParameterStatusList (list) --The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --Describes the status of a parameter group.
ParameterName (string) --The name of the parameter.
ParameterApplyStatus (string) --The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --The pending or in-progress change of the master user password for the cluster.
NodeType (string) --The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --The pending or in-progress change of the cluster type.
ClusterVersion (string) --The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --The name of the snapshot copy grant.
ClusterPublicKey (string) --The public key for the cluster.
ClusterNodes (list) --The nodes in the cluster.
(dict) --The identifier of a node in a cluster.
NodeRole (string) --Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --The private IP address of a node within a cluster.
PublicIPAddress (string) --The public IP address of a node within a cluster.
ElasticIpStatus (dict) --The status of the elastic IP (EIP) address.
ElasticIp (string) --The elastic IP (EIP) address for the cluster.
Status (string) --The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --The specific revision number of the database in the cluster.
Tags (list) --The list of tags for the cluster.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --Describes a group of DeferredMaintenanceWindow objects.
(dict) --Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --Returns the value ClassicResize .
AllowCancelResize (boolean) --A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
"""
pass
def purchase_reserved_node_offering(ReservedNodeOfferingId=None, NodeCount=None):
"""
Allows you to purchase reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings. You can call this API by providing a specific reserved node offering and the number of nodes you want to reserve.
For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.purchase_reserved_node_offering(
ReservedNodeOfferingId='string',
NodeCount=123
)
:type ReservedNodeOfferingId: string
:param ReservedNodeOfferingId: [REQUIRED]\nThe unique identifier of the reserved node offering you want to purchase.\n
:type NodeCount: integer
:param NodeCount: The number of reserved nodes that you want to purchase.\nDefault: 1\n
:rtype: dict
ReturnsResponse Syntax
{
'ReservedNode': {
'ReservedNodeId': 'string',
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'StartTime': datetime(2015, 1, 1),
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'NodeCount': 123,
'State': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
}
}
Response Structure
(dict) --
ReservedNode (dict) --
Describes a reserved node. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings.
ReservedNodeId (string) --
The unique identifier for the reservation.
ReservedNodeOfferingId (string) --
The identifier for the reserved node offering.
NodeType (string) --
The node type of the reserved node.
StartTime (datetime) --
The time the reservation started. You purchase a reserved node offering for a duration. This is the start time of that duration.
Duration (integer) --
The duration of the node reservation in seconds.
FixedPrice (float) --
The fixed cost Amazon Redshift charges you for this reserved node.
UsagePrice (float) --
The hourly rate Amazon Redshift charges you for this reserved node.
CurrencyCode (string) --
The currency code for the reserved cluster.
NodeCount (integer) --
The number of reserved compute nodes.
State (string) --
The state of the reserved compute node.
Possible Values:
pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
active-This reserved node is owned by the caller and is available for use.
payment-failed-Payment failed for the purchase attempt.
retired-The reserved node is no longer available.
exchanging-The owner is exchanging the reserved node for another reserved node.
OfferingType (string) --
The anticipated utilization of the reserved node, as defined in the reserved node offering.
RecurringCharges (list) --
The recurring charges for the reserved node.
(dict) --
Describes a recurring charge.
RecurringChargeAmount (float) --
The amount charged per the period of time specified by the recurring charge frequency.
RecurringChargeFrequency (string) --
The frequency at which the recurring charge amount is applied.
ReservedNodeOfferingType (string) --
Exceptions
Redshift.Client.exceptions.ReservedNodeOfferingNotFoundFault
Redshift.Client.exceptions.ReservedNodeAlreadyExistsFault
Redshift.Client.exceptions.ReservedNodeQuotaExceededFault
Redshift.Client.exceptions.UnsupportedOperationFault
:return: {
'ReservedNode': {
'ReservedNodeId': 'string',
'ReservedNodeOfferingId': 'string',
'NodeType': 'string',
'StartTime': datetime(2015, 1, 1),
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'NodeCount': 123,
'State': 'string',
'OfferingType': 'string',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
],
'ReservedNodeOfferingType': 'Regular'|'Upgradable'
}
}
:returns:
pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
active-This reserved node is owned by the caller and is available for use.
payment-failed-Payment failed for the purchase attempt.
retired-The reserved node is no longer available.
exchanging-The owner is exchanging the reserved node for another reserved node.
"""
pass
def reboot_cluster(ClusterIdentifier=None):
"""
Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to rebooting . A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster ) are applied at this reboot. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.reboot_cluster(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe cluster identifier.\n
:rtype: dict
ReturnsResponse Syntax{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --Describes a cluster.
ClusterIdentifier (string) --The unique identifier of the cluster.
NodeType (string) --The node type for the nodes in the cluster.
ClusterStatus (string) --The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --The connection endpoint.
Address (string) --The DNS address of the Cluster.
Port (integer) --The port that the database engine is listening on.
ClusterCreateTime (datetime) --The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --Describes a cluster security group.
ClusterSecurityGroupName (string) --The name of the cluster security group.
Status (string) --The status of the cluster security group.
VpcSecurityGroups (list) --A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --Describes the members of a VPC security group.
VpcSecurityGroupId (string) --The identifier of the VPC security group.
Status (string) --The status of the VPC security group.
ClusterParameterGroups (list) --The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --Describes the status of a parameter group.
ParameterGroupName (string) --The name of the cluster parameter group.
ParameterApplyStatus (string) --The status of parameter updates.
ClusterParameterStatusList (list) --The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --Describes the status of a parameter group.
ParameterName (string) --The name of the parameter.
ParameterApplyStatus (string) --The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --The pending or in-progress change of the master user password for the cluster.
NodeType (string) --The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --The pending or in-progress change of the cluster type.
ClusterVersion (string) --The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --The name of the snapshot copy grant.
ClusterPublicKey (string) --The public key for the cluster.
ClusterNodes (list) --The nodes in the cluster.
(dict) --The identifier of a node in a cluster.
NodeRole (string) --Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --The private IP address of a node within a cluster.
PublicIPAddress (string) --The public IP address of a node within a cluster.
ElasticIpStatus (dict) --The status of the elastic IP (EIP) address.
ElasticIp (string) --The elastic IP (EIP) address for the cluster.
Status (string) --The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --The specific revision number of the database in the cluster.
Tags (list) --The list of tags for the cluster.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --Describes a group of DeferredMaintenanceWindow objects.
(dict) --Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --Returns the value ClassicResize .
AllowCancelResize (boolean) --A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.ClusterNotFoundFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
"""
pass
def reset_cluster_parameter_group(ParameterGroupName=None, ResetAllParameters=None, Parameters=None):
"""
Sets one or more parameters of the specified parameter group to their default values and sets the source values of the parameters to "engine-default". To reset the entire parameter group specify the ResetAllParameters parameter. For parameter changes to take effect you must reboot any associated clusters.
See also: AWS API Documentation
Exceptions
:example: response = client.reset_cluster_parameter_group(
ParameterGroupName='string',
ResetAllParameters=True|False,
Parameters=[
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'ApplyType': 'static'|'dynamic',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string'
},
]
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of the cluster parameter group to be reset.\n
:type ResetAllParameters: boolean
:param ResetAllParameters: If true , all parameters in the specified parameter group will be reset to their default values.\nDefault: true\n
:type Parameters: list
:param Parameters: An array of names of parameters to be reset. If ResetAllParameters option is not used, then at least one parameter name must be supplied.\nConstraints: A maximum of 20 parameters can be reset in a single request.\n\n(dict) --Describes a parameter in a cluster parameter group.\n\nParameterName (string) --The name of the parameter.\n\nParameterValue (string) --The value of the parameter.\n\nDescription (string) --A description of the parameter.\n\nSource (string) --The source of the parameter value, such as 'engine-default' or 'user'.\n\nDataType (string) --The data type of the parameter.\n\nAllowedValues (string) --The valid range of values for the parameter.\n\nApplyType (string) --Specifies how to apply the WLM configuration parameter. Some properties can be applied dynamically, while other properties require that any associated clusters be rebooted for the configuration changes to be applied. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .\n\nIsModifiable (boolean) --If true , the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.\n\nMinimumEngineVersion (string) --The earliest engine version to which the parameter can apply.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ParameterGroupName': 'string',
'ParameterGroupStatus': 'string'
}
Response Structure
(dict) --
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterGroupStatus (string) --
The status of the parameter group. For example, if you made a change to a parameter group name-value pair, then the change could be pending a reboot of an associated cluster.
Exceptions
Redshift.Client.exceptions.InvalidClusterParameterGroupStateFault
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
:return: {
'ParameterGroupName': 'string',
'ParameterGroupStatus': 'string'
}
:returns:
Redshift.Client.exceptions.InvalidClusterParameterGroupStateFault
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
"""
pass
def resize_cluster(ClusterIdentifier=None, ClusterType=None, NodeType=None, NumberOfNodes=None, Classic=None):
"""
Changes the size of the cluster. You can change the cluster\'s type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.
Elastic resize operations have the following restrictions:
See also: AWS API Documentation
Exceptions
:example: response = client.resize_cluster(
ClusterIdentifier='string',
ClusterType='string',
NodeType='string',
NumberOfNodes=123,
Classic=True|False
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier for the cluster to resize.\n
:type ClusterType: string
:param ClusterType: The new cluster type for the specified cluster.
:type NodeType: string
:param NodeType: The new node type for the nodes you are adding. If not specified, the cluster\'s current node type is used.
:type NumberOfNodes: integer
:param NumberOfNodes: The new number of nodes for the cluster.
:type Classic: boolean
:param Classic: A boolean value indicating whether the resize operation is using the classic resize process. If you don\'t provide this parameter or set the value to false , the resize type is elastic.
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.NumberOfNodesQuotaExceededFault
Redshift.Client.exceptions.NumberOfNodesPerClusterLimitExceededFault
Redshift.Client.exceptions.InsufficientClusterCapacityFault
Redshift.Client.exceptions.UnsupportedOptionFault
Redshift.Client.exceptions.UnsupportedOperationFault
Redshift.Client.exceptions.UnauthorizedOperation
Redshift.Client.exceptions.LimitExceededFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
ClusterIdentifier (string) -- [REQUIRED]
The unique identifier for the cluster to resize.
ClusterType (string) -- The new cluster type for the specified cluster.
NodeType (string) -- The new node type for the nodes you are adding. If not specified, the cluster\'s current node type is used.
NumberOfNodes (integer) -- The new number of nodes for the cluster.
Classic (boolean) -- A boolean value indicating whether the resize operation is using the classic resize process. If you don\'t provide this parameter or set the value to false , the resize type is elastic.
"""
pass
def restore_from_cluster_snapshot(ClusterIdentifier=None, SnapshotIdentifier=None, SnapshotClusterIdentifier=None, Port=None, AvailabilityZone=None, AllowVersionUpgrade=None, ClusterSubnetGroupName=None, PubliclyAccessible=None, OwnerAccount=None, HsmClientCertificateIdentifier=None, HsmConfigurationIdentifier=None, ElasticIp=None, ClusterParameterGroupName=None, ClusterSecurityGroups=None, VpcSecurityGroupIds=None, PreferredMaintenanceWindow=None, AutomatedSnapshotRetentionPeriod=None, ManualSnapshotRetentionPeriod=None, KmsKeyId=None, NodeType=None, EnhancedVpcRouting=None, AdditionalInfo=None, IamRoles=None, MaintenanceTrackName=None, SnapshotScheduleIdentifier=None, NumberOfNodes=None):
"""
Creates a new cluster from a snapshot. By default, Amazon Redshift creates the resulting cluster with the same configuration as the original cluster from which the snapshot was created, except that the new cluster is created with the default cluster security and parameter groups. After Amazon Redshift creates the cluster, you can use the ModifyCluster API to associate a different security group and different parameter group with the restored cluster. If you are using a DS node type, you can also choose to change to another DS node type of the same size during restore.
If you restore a cluster into a VPC, you must provide a cluster subnet group where you want the cluster restored.
For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.restore_from_cluster_snapshot(
ClusterIdentifier='string',
SnapshotIdentifier='string',
SnapshotClusterIdentifier='string',
Port=123,
AvailabilityZone='string',
AllowVersionUpgrade=True|False,
ClusterSubnetGroupName='string',
PubliclyAccessible=True|False,
OwnerAccount='string',
HsmClientCertificateIdentifier='string',
HsmConfigurationIdentifier='string',
ElasticIp='string',
ClusterParameterGroupName='string',
ClusterSecurityGroups=[
'string',
],
VpcSecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
AutomatedSnapshotRetentionPeriod=123,
ManualSnapshotRetentionPeriod=123,
KmsKeyId='string',
NodeType='string',
EnhancedVpcRouting=True|False,
AdditionalInfo='string',
IamRoles=[
'string',
],
MaintenanceTrackName='string',
SnapshotScheduleIdentifier='string',
NumberOfNodes=123
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the cluster that will be created from restoring the snapshot.\nConstraints:\n\nMust contain from 1 to 63 alphanumeric characters or hyphens.\nAlphabetic characters must be lowercase.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\nMust be unique for all clusters within an AWS account.\n\n
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]\nThe name of the snapshot from which to create the new cluster. This parameter isn\'t case sensitive.\nExample: my-snapshot-id\n
:type SnapshotClusterIdentifier: string
:param SnapshotClusterIdentifier: The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
:type Port: integer
:param Port: The port number on which the cluster accepts connections.\nDefault: The same port as the original cluster.\nConstraints: Must be between 1115 and 65535 .\n
:type AvailabilityZone: string
:param AvailabilityZone: The Amazon EC2 Availability Zone in which to restore the cluster.\nDefault: A random, system-chosen Availability Zone.\nExample: us-east-2a\n
:type AllowVersionUpgrade: boolean
:param AllowVersionUpgrade: If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.\nDefault: true\n
:type ClusterSubnetGroupName: string
:param ClusterSubnetGroupName: The name of the subnet group where you want to cluster restored.\nA snapshot of cluster in VPC can be restored only in VPC. Therefore, you must provide subnet group name where you want the cluster restored.\n
:type PubliclyAccessible: boolean
:param PubliclyAccessible: If true , the cluster can be accessed from a public network.
:type OwnerAccount: string
:param OwnerAccount: The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.
:type HsmClientCertificateIdentifier: string
:param HsmClientCertificateIdentifier: Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
:type ElasticIp: string
:param ElasticIp: The elastic IP (EIP) address for the cluster.
:type ClusterParameterGroupName: string
:param ClusterParameterGroupName: The name of the parameter group to be associated with this cluster.\nDefault: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups .\nConstraints:\n\nMust be 1 to 255 alphanumeric characters or hyphens.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:type ClusterSecurityGroups: list
:param ClusterSecurityGroups: A list of security groups to be associated with this cluster.\nDefault: The default cluster security group for Amazon Redshift.\nCluster security groups only apply to clusters outside of VPCs.\n\n(string) --\n\n
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.\nDefault: The default VPC security group is associated with the cluster.\nVPC security groups only apply to clusters in VPCs.\n\n(string) --\n\n
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The weekly time range (in UTC) during which automated cluster maintenance can occur.\nFormat: ddd:hh24:mi-ddd:hh24:mi\nDefault: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.\nValid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun\nConstraints: Minimum 30-minute window.\n
:type AutomatedSnapshotRetentionPeriod: integer
:param AutomatedSnapshotRetentionPeriod: The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot .\nDefault: The value selected for the cluster from which the snapshot was taken.\nConstraints: Must be a value from 0 to 35.\n
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod: The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.\nThe value must be either -1 or an integer between 1 and 3,653.\n
:type KmsKeyId: string
:param KmsKeyId: The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster that you restore from a shared snapshot.
:type NodeType: string
:param NodeType: The node type that the restored cluster will be provisioned with.\nDefault: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can\'t restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide .\n
:type EnhancedVpcRouting: boolean
:param EnhancedVpcRouting: An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.\nIf this option is true , enhanced VPC routing is enabled.\nDefault: false\n
:type AdditionalInfo: string
:param AdditionalInfo: Reserved.
:type IamRoles: list
:param IamRoles: A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.\nA cluster can have up to 10 IAM roles associated at any time.\n\n(string) --\n\n
:type MaintenanceTrackName: string
:param MaintenanceTrackName: The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks.
:type SnapshotScheduleIdentifier: string
:param SnapshotScheduleIdentifier: A unique identifier for the snapshot schedule.
:type NumberOfNodes: integer
:param NumberOfNodes: The number of nodes specified when provisioning the restored cluster.
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --
Describes a cluster.
ClusterIdentifier (string) --
The unique identifier of the cluster.
NodeType (string) --
The node type for the nodes in the cluster.
ClusterStatus (string) --
The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --
The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --
The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --
The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --
The connection endpoint.
Address (string) --
The DNS address of the Cluster.
Port (integer) --
The port that the database engine is listening on.
ClusterCreateTime (datetime) --
The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --
The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --
Describes a cluster security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group.
Status (string) --
The status of the cluster security group.
VpcSecurityGroups (list) --
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --
Describes the members of a VPC security group.
VpcSecurityGroupId (string) --
The identifier of the VPC security group.
Status (string) --
The status of the VPC security group.
ClusterParameterGroups (list) --
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --
Describes the status of a parameter group.
ParameterGroupName (string) --
The name of the cluster parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
ClusterParameterStatusList (list) --
The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --
Describes the status of a parameter group.
ParameterName (string) --
The name of the parameter.
ParameterApplyStatus (string) --
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --
The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --
The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --
The pending or in-progress change of the master user password for the cluster.
NodeType (string) --
The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --
The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --
The pending or in-progress change of the cluster type.
ClusterVersion (string) --
The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --
The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --
The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --
The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --
A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --
The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --
A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --
A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --
The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --
The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --
The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --
Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --
Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --
Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --
Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --
Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --
Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --
The name of the snapshot copy grant.
ClusterPublicKey (string) --
The public key for the cluster.
ClusterNodes (list) --
The nodes in the cluster.
(dict) --
The identifier of a node in a cluster.
NodeRole (string) --
Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --
The private IP address of a node within a cluster.
PublicIPAddress (string) --
The public IP address of a node within a cluster.
ElasticIpStatus (dict) --
The status of the elastic IP (EIP) address.
ElasticIp (string) --
The elastic IP (EIP) address for the cluster.
Status (string) --
The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --
The specific revision number of the database in the cluster.
Tags (list) --
The list of tags for the cluster.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
KmsKeyId (string) --
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --
A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --
Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --
The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --
The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --
Describes a group of DeferredMaintenanceWindow objects.
(dict) --
Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --
A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --
A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --
A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --
A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --
The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --
The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --
The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --
The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --
Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --
Returns the value ClassicResize .
AllowCancelResize (boolean) --
A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.AccessToSnapshotDeniedFault
Redshift.Client.exceptions.ClusterAlreadyExistsFault
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.ClusterQuotaExceededFault
Redshift.Client.exceptions.InsufficientClusterCapacityFault
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.InvalidRestoreFault
Redshift.Client.exceptions.NumberOfNodesQuotaExceededFault
Redshift.Client.exceptions.NumberOfNodesPerClusterLimitExceededFault
Redshift.Client.exceptions.InvalidVPCNetworkStateFault
Redshift.Client.exceptions.InvalidClusterSubnetGroupStateFault
Redshift.Client.exceptions.InvalidSubnet
Redshift.Client.exceptions.ClusterSubnetGroupNotFoundFault
Redshift.Client.exceptions.UnauthorizedOperation
Redshift.Client.exceptions.HsmClientCertificateNotFoundFault
Redshift.Client.exceptions.HsmConfigurationNotFoundFault
Redshift.Client.exceptions.InvalidElasticIpFault
Redshift.Client.exceptions.ClusterParameterGroupNotFoundFault
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.LimitExceededFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
Redshift.Client.exceptions.InvalidClusterTrackFault
Redshift.Client.exceptions.SnapshotScheduleNotFoundFault
Redshift.Client.exceptions.TagLimitExceededFault
Redshift.Client.exceptions.InvalidTagFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def restore_table_from_cluster_snapshot(ClusterIdentifier=None, SnapshotIdentifier=None, SourceDatabaseName=None, SourceSchemaName=None, SourceTableName=None, TargetDatabaseName=None, TargetSchemaName=None, NewTableName=None):
"""
Creates a new table from a table in an Amazon Redshift cluster snapshot. You must create the new table within the Amazon Redshift cluster that the snapshot was taken from.
You cannot use RestoreTableFromClusterSnapshot to restore a table with the same name as an existing table in an Amazon Redshift cluster. That is, you cannot overwrite an existing table in a cluster with a restored table. If you want to replace your original table with a new, restored table, then rename or drop your original table before you call RestoreTableFromClusterSnapshot . When you have renamed your original table, then you can pass the original name of the table as the NewTableName parameter value in the call to RestoreTableFromClusterSnapshot . This way, you can replace the original table with the table created from the snapshot.
See also: AWS API Documentation
Exceptions
:example: response = client.restore_table_from_cluster_snapshot(
ClusterIdentifier='string',
SnapshotIdentifier='string',
SourceDatabaseName='string',
SourceSchemaName='string',
SourceTableName='string',
TargetDatabaseName='string',
TargetSchemaName='string',
NewTableName='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the Amazon Redshift cluster to restore the table to.\n
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]\nThe identifier of the snapshot to restore the table from. This snapshot must have been created from the Amazon Redshift cluster specified by the ClusterIdentifier parameter.\n
:type SourceDatabaseName: string
:param SourceDatabaseName: [REQUIRED]\nThe name of the source database that contains the table to restore from.\n
:type SourceSchemaName: string
:param SourceSchemaName: The name of the source schema that contains the table to restore from. If you do not specify a SourceSchemaName value, the default is public .
:type SourceTableName: string
:param SourceTableName: [REQUIRED]\nThe name of the source table to restore from.\n
:type TargetDatabaseName: string
:param TargetDatabaseName: The name of the database to restore the table to.
:type TargetSchemaName: string
:param TargetSchemaName: The name of the schema to restore the table to.
:type NewTableName: string
:param NewTableName: [REQUIRED]\nThe name of the table to create as a result of the current request.\n
:rtype: dict
ReturnsResponse Syntax
{
'TableRestoreStatus': {
'TableRestoreRequestId': 'string',
'Status': 'PENDING'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'CANCELED',
'Message': 'string',
'RequestTime': datetime(2015, 1, 1),
'ProgressInMegaBytes': 123,
'TotalDataInMegaBytes': 123,
'ClusterIdentifier': 'string',
'SnapshotIdentifier': 'string',
'SourceDatabaseName': 'string',
'SourceSchemaName': 'string',
'SourceTableName': 'string',
'TargetDatabaseName': 'string',
'TargetSchemaName': 'string',
'NewTableName': 'string'
}
}
Response Structure
(dict) --
TableRestoreStatus (dict) --
Describes the status of a RestoreTableFromClusterSnapshot operation.
TableRestoreRequestId (string) --
The unique identifier for the table restore request.
Status (string) --
A value that describes the current state of the table restore request.
Valid Values: SUCCEEDED , FAILED , CANCELED , PENDING , IN_PROGRESS
Message (string) --
A description of the status of the table restore request. Status values include SUCCEEDED , FAILED , CANCELED , PENDING , IN_PROGRESS .
RequestTime (datetime) --
The time that the table restore request was made, in Universal Coordinated Time (UTC).
ProgressInMegaBytes (integer) --
The amount of data restored to the new table so far, in megabytes (MB).
TotalDataInMegaBytes (integer) --
The total amount of data to restore to the new table, in megabytes (MB).
ClusterIdentifier (string) --
The identifier of the Amazon Redshift cluster that the table is being restored to.
SnapshotIdentifier (string) --
The identifier of the snapshot that the table is being restored from.
SourceDatabaseName (string) --
The name of the source database that contains the table being restored.
SourceSchemaName (string) --
The name of the source schema that contains the table being restored.
SourceTableName (string) --
The name of the source table being restored.
TargetDatabaseName (string) --
The name of the database to restore the table to.
TargetSchemaName (string) --
The name of the schema to restore the table to.
NewTableName (string) --
The name of the table to create as a result of the table restore request.
Exceptions
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.InProgressTableRestoreQuotaExceededFault
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.InvalidTableRestoreArgumentFault
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.UnsupportedOperationFault
:return: {
'TableRestoreStatus': {
'TableRestoreRequestId': 'string',
'Status': 'PENDING'|'IN_PROGRESS'|'SUCCEEDED'|'FAILED'|'CANCELED',
'Message': 'string',
'RequestTime': datetime(2015, 1, 1),
'ProgressInMegaBytes': 123,
'TotalDataInMegaBytes': 123,
'ClusterIdentifier': 'string',
'SnapshotIdentifier': 'string',
'SourceDatabaseName': 'string',
'SourceSchemaName': 'string',
'SourceTableName': 'string',
'TargetDatabaseName': 'string',
'TargetSchemaName': 'string',
'NewTableName': 'string'
}
}
:returns:
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
Redshift.Client.exceptions.InProgressTableRestoreQuotaExceededFault
Redshift.Client.exceptions.InvalidClusterSnapshotStateFault
Redshift.Client.exceptions.InvalidTableRestoreArgumentFault
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.UnsupportedOperationFault
"""
pass
def resume_cluster(ClusterIdentifier=None):
"""
Resumes a paused cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.resume_cluster(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe identifier of the cluster to be resumed.\n
:rtype: dict
ReturnsResponse Syntax{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --Describes a cluster.
ClusterIdentifier (string) --The unique identifier of the cluster.
NodeType (string) --The node type for the nodes in the cluster.
ClusterStatus (string) --The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --The connection endpoint.
Address (string) --The DNS address of the Cluster.
Port (integer) --The port that the database engine is listening on.
ClusterCreateTime (datetime) --The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --Describes a cluster security group.
ClusterSecurityGroupName (string) --The name of the cluster security group.
Status (string) --The status of the cluster security group.
VpcSecurityGroups (list) --A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --Describes the members of a VPC security group.
VpcSecurityGroupId (string) --The identifier of the VPC security group.
Status (string) --The status of the VPC security group.
ClusterParameterGroups (list) --The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --Describes the status of a parameter group.
ParameterGroupName (string) --The name of the cluster parameter group.
ParameterApplyStatus (string) --The status of parameter updates.
ClusterParameterStatusList (list) --The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --Describes the status of a parameter group.
ParameterName (string) --The name of the parameter.
ParameterApplyStatus (string) --The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --The pending or in-progress change of the master user password for the cluster.
NodeType (string) --The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --The pending or in-progress change of the cluster type.
ClusterVersion (string) --The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --The name of the snapshot copy grant.
ClusterPublicKey (string) --The public key for the cluster.
ClusterNodes (list) --The nodes in the cluster.
(dict) --The identifier of a node in a cluster.
NodeRole (string) --Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --The private IP address of a node within a cluster.
PublicIPAddress (string) --The public IP address of a node within a cluster.
ElasticIpStatus (dict) --The status of the elastic IP (EIP) address.
ElasticIp (string) --The elastic IP (EIP) address for the cluster.
Status (string) --The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --The specific revision number of the database in the cluster.
Tags (list) --The list of tags for the cluster.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --Describes a group of DeferredMaintenanceWindow objects.
(dict) --Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --Returns the value ClassicResize .
AllowCancelResize (boolean) --A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
"""
pass
def revoke_cluster_security_group_ingress(ClusterSecurityGroupName=None, CIDRIP=None, EC2SecurityGroupName=None, EC2SecurityGroupOwnerId=None):
"""
Revokes an ingress rule in an Amazon Redshift security group for a previously authorized IP range or Amazon EC2 security group. To add an ingress rule, see AuthorizeClusterSecurityGroupIngress . For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.revoke_cluster_security_group_ingress(
ClusterSecurityGroupName='string',
CIDRIP='string',
EC2SecurityGroupName='string',
EC2SecurityGroupOwnerId='string'
)
:type ClusterSecurityGroupName: string
:param ClusterSecurityGroupName: [REQUIRED]\nThe name of the security Group from which to revoke the ingress rule.\n
:type CIDRIP: string
:param CIDRIP: The IP range for which to revoke access. This range must be a valid Classless Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided.
:type EC2SecurityGroupName: string
:param EC2SecurityGroupName: The name of the EC2 Security Group whose access is to be revoked. If EC2SecurityGroupName is specified, EC2SecurityGroupOwnerId must also be provided and CIDRIP cannot be provided.
:type EC2SecurityGroupOwnerId: string
:param EC2SecurityGroupOwnerId: The AWS account number of the owner of the security group specified in the EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must also be provided. and CIDRIP cannot be provided.\nExample: 111122223333\n
:rtype: dict
ReturnsResponse Syntax
{
'ClusterSecurityGroup': {
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
ClusterSecurityGroup (dict) --
Describes a security group.
ClusterSecurityGroupName (string) --
The name of the cluster security group to which the operation was applied.
Description (string) --
A description of the security group.
EC2SecurityGroups (list) --
A list of EC2 security groups that are permitted to access clusters associated with this cluster security group.
(dict) --
Describes an Amazon EC2 security group.
Status (string) --
The status of the EC2 security group.
EC2SecurityGroupName (string) --
The name of the EC2 Security Group.
EC2SecurityGroupOwnerId (string) --
The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.
Tags (list) --
The list of tags for the EC2 security group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
IPRanges (list) --
A list of IP ranges (CIDR blocks) that are permitted to access clusters associated with this cluster security group.
(dict) --
Describes an IP range used in a security group.
Status (string) --
The status of the IP range, for example, "authorized".
CIDRIP (string) --
The IP range in Classless Inter-Domain Routing (CIDR) notation.
Tags (list) --
The list of tags for the IP range.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Tags (list) --
The list of tags for the cluster security group.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
Exceptions
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.AuthorizationNotFoundFault
Redshift.Client.exceptions.InvalidClusterSecurityGroupStateFault
:return: {
'ClusterSecurityGroup': {
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Redshift.Client.exceptions.ClusterSecurityGroupNotFoundFault
Redshift.Client.exceptions.AuthorizationNotFoundFault
Redshift.Client.exceptions.InvalidClusterSecurityGroupStateFault
"""
pass
def revoke_snapshot_access(SnapshotIdentifier=None, SnapshotClusterIdentifier=None, AccountWithRestoreAccess=None):
"""
Removes the ability of the specified AWS customer account to restore the specified snapshot. If the account is currently restoring the snapshot, the restore will run to completion.
For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.revoke_snapshot_access(
SnapshotIdentifier='string',
SnapshotClusterIdentifier='string',
AccountWithRestoreAccess='string'
)
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]\nThe identifier of the snapshot that the account can no longer access.\n
:type SnapshotClusterIdentifier: string
:param SnapshotClusterIdentifier: The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
:type AccountWithRestoreAccess: string
:param AccountWithRestoreAccess: [REQUIRED]\nThe identifier of the AWS customer account that can no longer restore the specified snapshot.\n
:rtype: dict
ReturnsResponse Syntax
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Snapshot (dict) --
Describes a snapshot.
SnapshotIdentifier (string) --
The snapshot identifier that is provided in the request.
ClusterIdentifier (string) --
The identifier of the cluster for which the snapshot was taken.
SnapshotCreateTime (datetime) --
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
Status (string) --
The snapshot status. The value of the status depends on the API operation used:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
Port (integer) --
The port that the cluster is listening on.
AvailabilityZone (string) --
The Availability Zone in which the cluster was created.
ClusterCreateTime (datetime) --
The time (UTC) when the cluster was originally created.
MasterUsername (string) --
The master user name for the cluster.
ClusterVersion (string) --
The version ID of the Amazon Redshift engine that is running on the cluster.
SnapshotType (string) --
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
NodeType (string) --
The node type of the nodes in the cluster.
NumberOfNodes (integer) --
The number of nodes in the cluster.
DBName (string) --
The name of the database that was created when the cluster was created.
VpcId (string) --
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
Encrypted (boolean) --
If true , the data in the snapshot is encrypted at rest.
KmsKeyId (string) --
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
EncryptedWithHSM (boolean) --
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.
AccountsWithRestoreAccess (list) --
A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.
(dict) --
Describes an AWS customer account authorized to restore a snapshot.
AccountId (string) --
The identifier of an AWS customer account authorized to restore a snapshot.
AccountAlias (string) --
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support .
OwnerAccount (string) --
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
TotalBackupSizeInMegaBytes (float) --
The size of the complete set of backup data that would be used to restore the cluster.
ActualIncrementalBackupSizeInMegaBytes (float) --
The size of the incremental backup.
BackupProgressInMegaBytes (float) --
The number of megabytes that have been transferred to the snapshot backup.
CurrentBackupRateInMegaBytesPerSecond (float) --
The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.
EstimatedSecondsToCompletion (integer) --
The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.
ElapsedTimeInSeconds (integer) --
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
SourceRegion (string) --
The source region from which the snapshot was copied.
Tags (list) --
The list of tags for the cluster snapshot.
(dict) --
A tag consisting of a name/value pair for a resource.
Key (string) --
The key, or name, for the resource tag.
Value (string) --
The value for the resource tag.
RestorableNodeTypes (list) --
The list of node types that this cluster snapshot is able to restore into.
(string) --
EnhancedVpcRouting (boolean) --
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --
The name of the maintenance track for the snapshot.
ManualSnapshotRetentionPeriod (integer) --
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
ManualSnapshotRemainingDays (integer) --
The number of days until a manual snapshot will pass its retention period.
SnapshotRetentionStartTime (datetime) --
A timestamp representing the start of the retention period for the snapshot.
Exceptions
Redshift.Client.exceptions.AccessToSnapshotDeniedFault
Redshift.Client.exceptions.AuthorizationNotFoundFault
Redshift.Client.exceptions.ClusterSnapshotNotFoundFault
:return: {
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def rotate_encryption_key(ClusterIdentifier=None):
"""
Rotates the encryption keys for a cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.rotate_encryption_key(
ClusterIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]\nThe unique identifier of the cluster that you want to rotate the encryption keys for.\nConstraints: Must be the name of valid cluster that has encryption enabled.\n
:rtype: dict
ReturnsResponse Syntax{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
Response Structure
(dict) --
Cluster (dict) --Describes a cluster.
ClusterIdentifier (string) --The unique identifier of the cluster.
NodeType (string) --The node type for the nodes in the cluster.
ClusterStatus (string) --The current state of the cluster. Possible values are the following:
available
available, prep-for-resize
available, resize-cleanup
cancelling-resize
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
paused
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
ClusterAvailabilityStatus (string) --The availability status of the cluster for queries. Possible values are the following:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
ModifyStatus (string) --The status of a modify operation, if any, initiated for the cluster.
MasterUsername (string) --The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.
DBName (string) --The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named dev dev was created by default.
Endpoint (dict) --The connection endpoint.
Address (string) --The DNS address of the Cluster.
Port (integer) --The port that the database engine is listening on.
ClusterCreateTime (datetime) --The date and time that the cluster was created.
AutomatedSnapshotRetentionPeriod (integer) --The number of days that automatic cluster snapshots are retained.
ManualSnapshotRetentionPeriod (integer) --The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn\'t change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
ClusterSecurityGroups (list) --A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.
(dict) --Describes a cluster security group.
ClusterSecurityGroupName (string) --The name of the cluster security group.
Status (string) --The status of the cluster security group.
VpcSecurityGroups (list) --A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
(dict) --Describes the members of a VPC security group.
VpcSecurityGroupId (string) --The identifier of the VPC security group.
Status (string) --The status of the VPC security group.
ClusterParameterGroups (list) --The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
(dict) --Describes the status of a parameter group.
ParameterGroupName (string) --The name of the cluster parameter group.
ParameterApplyStatus (string) --The status of parameter updates.
ClusterParameterStatusList (list) --The list of parameter statuses.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
(dict) --Describes the status of a parameter group.
ParameterName (string) --The name of the parameter.
ParameterApplyStatus (string) --The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
in-sync : The parameter value is in sync with the database.
pending-reboot : The parameter value will be applied after the cluster reboots.
applying : The parameter value is being applied to the database.
invalid-parameter : Cannot apply the parameter value because it has an invalid value or syntax.
apply-deferred : The parameter contains static property changes. The changes are deferred until the cluster reboots.
apply-error : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
unknown-error : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
ParameterApplyErrorDescription (string) --The error that prevented the parameter from being applied to the database.
ClusterSubnetGroupName (string) --The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
VpcId (string) --The identifier of the VPC the cluster is in, if the cluster is in a VPC.
AvailabilityZone (string) --The name of the Availability Zone in which the cluster is located.
PreferredMaintenanceWindow (string) --The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
PendingModifiedValues (dict) --A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
MasterUserPassword (string) --The pending or in-progress change of the master user password for the cluster.
NodeType (string) --The pending or in-progress change of the cluster\'s node type.
NumberOfNodes (integer) --The pending or in-progress change of the number of nodes in the cluster.
ClusterType (string) --The pending or in-progress change of the cluster type.
ClusterVersion (string) --The pending or in-progress change of the service version.
AutomatedSnapshotRetentionPeriod (integer) --The pending or in-progress change of the automated snapshot retention period.
ClusterIdentifier (string) --The pending or in-progress change of the new identifier for the cluster.
PubliclyAccessible (boolean) --The pending or in-progress change of the ability to connect to the cluster from the public network.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
MaintenanceTrackName (string) --The name of the maintenance track that the cluster will change to during the next maintenance window.
EncryptionType (string) --The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
ClusterVersion (string) --The version ID of the Amazon Redshift engine that is running on the cluster.
AllowVersionUpgrade (boolean) --A boolean value that, if true , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
NumberOfNodes (integer) --The number of compute nodes in the cluster.
PubliclyAccessible (boolean) --A boolean value that, if true , indicates that the cluster can be accessed from a public network.
Encrypted (boolean) --A boolean value that, if true , indicates that data in the cluster is encrypted at rest.
RestoreStatus (dict) --A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
Status (string) --The status of the restore action. Returns starting, restoring, completed, or failed.
CurrentRestoreRateInMegaBytesPerSecond (float) --The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
SnapshotSizeInMegaBytes (integer) --The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
ProgressInMegaBytes (integer) --The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
ElapsedTimeInSeconds (integer) --The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
EstimatedTimeToCompletionInSeconds (integer) --The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
DataTransferProgress (dict) --
Status (string) --Describes the status of the cluster. While the transfer is in progress the status is transferringdata .
CurrentRateInMegaBytesPerSecond (float) --Describes the data transfer rate in MB\'s per second.
TotalDataInMegaBytes (integer) --Describes the total amount of data to be transfered in megabytes.
DataTransferredInMegaBytes (integer) --Describes the total amount of data that has been transfered in MB\'s.
EstimatedTimeToCompletionInSeconds (integer) --Describes the estimated number of seconds remaining to complete the transfer.
ElapsedTimeInSeconds (integer) --Describes the number of seconds that have elapsed during the data transfer.
HsmStatus (dict) --A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
HsmClientCertificateIdentifier (string) --Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
HsmConfigurationIdentifier (string) --Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
Status (string) --Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
ClusterSnapshotCopyStatus (dict) --A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
DestinationRegion (string) --The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
RetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
ManualSnapshotRetentionPeriod (integer) --The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
SnapshotCopyGrantName (string) --The name of the snapshot copy grant.
ClusterPublicKey (string) --The public key for the cluster.
ClusterNodes (list) --The nodes in the cluster.
(dict) --The identifier of a node in a cluster.
NodeRole (string) --Whether the node is a leader node or a compute node.
PrivateIPAddress (string) --The private IP address of a node within a cluster.
PublicIPAddress (string) --The public IP address of a node within a cluster.
ElasticIpStatus (dict) --The status of the elastic IP (EIP) address.
ElasticIp (string) --The elastic IP (EIP) address for the cluster.
Status (string) --The status of the elastic IP (EIP) address.
ClusterRevisionNumber (string) --The specific revision number of the database in the cluster.
Tags (list) --The list of tags for the cluster.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
KmsKeyId (string) --The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
EnhancedVpcRouting (boolean) --An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
IamRoles (list) --A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
(dict) --An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
IamRoleArn (string) --The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload .
ApplyStatus (string) --A value that describes the status of the IAM role\'s association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
in-sync : The role is available for use by the cluster.
adding : The role is in the process of being associated with the cluster.
removing : The role is in the process of being disassociated with the cluster.
PendingActions (list) --Cluster operations that are waiting to be started.
(string) --
MaintenanceTrackName (string) --The name of the maintenance track for the cluster.
ElasticResizeNumberOfNodeOptions (string) --The number of nodes that you can resize the cluster to with the elastic resize method.
DeferredMaintenanceWindows (list) --Describes a group of DeferredMaintenanceWindow objects.
(dict) --Describes a deferred maintenance window
DeferMaintenanceIdentifier (string) --A unique identifier for the maintenance window.
DeferMaintenanceStartTime (datetime) --A timestamp for the beginning of the time period when we defer maintenance.
DeferMaintenanceEndTime (datetime) --A timestamp for the end of the time period when we defer maintenance.
SnapshotScheduleIdentifier (string) --A unique identifier for the cluster snapshot schedule.
SnapshotScheduleState (string) --The current state of the cluster snapshot schedule.
ExpectedNextSnapshotScheduleTime (datetime) --The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.
ExpectedNextSnapshotScheduleTimeStatus (string) --The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:
OnTrack - The next snapshot is expected to be taken on time.
Pending - The next snapshot is pending to be taken.
NextMaintenanceWindowStartTime (datetime) --The date and time in UTC when system maintenance can begin.
ResizeInfo (dict) --Returns the following:
AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
ResizeType: Returns ClassicResize
ResizeType (string) --Returns the value ClassicResize .
AllowCancelResize (boolean) --A boolean value indicating if the resize operation can be cancelled.
Exceptions
Redshift.Client.exceptions.ClusterNotFoundFault
Redshift.Client.exceptions.InvalidClusterStateFault
Redshift.Client.exceptions.DependentServiceRequestThrottlingFault
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ClusterAvailabilityStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ExpectedNextSnapshotScheduleTime': datetime(2015, 1, 1),
'ExpectedNextSnapshotScheduleTimeStatus': 'string',
'NextMaintenanceWindowStartTime': datetime(2015, 1, 1),
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
:returns:
Available - The cluster is available for queries.
Unavailable - The cluster is not available for queries.
Maintenance - The cluster is intermittently available for queries due to maintenance activities.
Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.
Failed - The cluster failed and is not available for queries.
"""
pass
| 36.524366
| 1,540
| 0.680358
| 93,130
| 849,922
| 6.204166
| 0.018952
| 0.025355
| 0.017072
| 0.006639
| 0.9171
| 0.900979
| 0.890644
| 0.879203
| 0.87116
| 0.86131
| 0
| 0.009511
| 0.245619
| 849,922
| 23,269
| 1,541
| 36.525936
| 0.891652
| 0.98248
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.515625
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
378c6607bc682f384adb52333c42385b4f876e08
| 36,755
|
py
|
Python
|
sdk/python/pulumi_civo/kubernetes_cluster.py
|
rawkode/pulumi-civo
|
ee8a904a6fdf917a2e794e6783069da1b9d1d888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_civo/kubernetes_cluster.py
|
rawkode/pulumi-civo
|
ee8a904a6fdf917a2e794e6783069da1b9d1d888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_civo/kubernetes_cluster.py
|
rawkode/pulumi-civo
|
ee8a904a6fdf917a2e794e6783069da1b9d1d888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['KubernetesClusterArgs', 'KubernetesCluster']
@pulumi.input_type
class KubernetesClusterArgs:
def __init__(__self__, *,
firewall_id: pulumi.Input[str],
applications: Optional[pulumi.Input[str]] = None,
kubernetes_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
num_target_nodes: Optional[pulumi.Input[int]] = None,
region: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_nodes_size: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a KubernetesCluster resource.
:param pulumi.Input[str] firewall_id: The existing firewall ID to use for this cluster
:param pulumi.Input[str] applications: Comma separated list of applications to install. Spaces within application names are fine, but shouldn't be either side of the comma. Application names are case-sensitive; the available applications can be listed with the Civo CLI: 'civo kubernetes applications ls'. If you want to remove a default installed application, prefix it with a '-', e.g. -Traefik. For application that supports plans, you can use 'app*name:app*plan' format e.g. 'Linkerd:Linkerd & Jaeger' or 'MariaDB:5GB'.
:param pulumi.Input[str] kubernetes_version: The version of k3s to install (optional, the default is currently the latest available)
:param pulumi.Input[str] name: Name for your cluster, must be unique within your account
:param pulumi.Input[str] network_id: The network for the cluster, if not declare we use the default one
:param pulumi.Input[int] num_target_nodes: The number of instances to create (optional, the default at the time of writing is 3)
:param pulumi.Input[str] region: The region for the cluster, if not declare we use the region in declared in the provider
:param pulumi.Input[str] tags: Space separated list of tags, to be used freely as required
:param pulumi.Input[str] target_nodes_size: The size of each node (optional, the default is currently g3.k3s.medium)
"""
pulumi.set(__self__, "firewall_id", firewall_id)
if applications is not None:
pulumi.set(__self__, "applications", applications)
if kubernetes_version is not None:
pulumi.set(__self__, "kubernetes_version", kubernetes_version)
if name is not None:
pulumi.set(__self__, "name", name)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if num_target_nodes is not None:
pulumi.set(__self__, "num_target_nodes", num_target_nodes)
if region is not None:
pulumi.set(__self__, "region", region)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if target_nodes_size is not None:
pulumi.set(__self__, "target_nodes_size", target_nodes_size)
@property
@pulumi.getter(name="firewallId")
def firewall_id(self) -> pulumi.Input[str]:
"""
The existing firewall ID to use for this cluster
"""
return pulumi.get(self, "firewall_id")
@firewall_id.setter
def firewall_id(self, value: pulumi.Input[str]):
pulumi.set(self, "firewall_id", value)
@property
@pulumi.getter
def applications(self) -> Optional[pulumi.Input[str]]:
"""
Comma separated list of applications to install. Spaces within application names are fine, but shouldn't be either side of the comma. Application names are case-sensitive; the available applications can be listed with the Civo CLI: 'civo kubernetes applications ls'. If you want to remove a default installed application, prefix it with a '-', e.g. -Traefik. For application that supports plans, you can use 'app*name:app*plan' format e.g. 'Linkerd:Linkerd & Jaeger' or 'MariaDB:5GB'.
"""
return pulumi.get(self, "applications")
@applications.setter
def applications(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "applications", value)
@property
@pulumi.getter(name="kubernetesVersion")
def kubernetes_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of k3s to install (optional, the default is currently the latest available)
"""
return pulumi.get(self, "kubernetes_version")
@kubernetes_version.setter
def kubernetes_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kubernetes_version", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name for your cluster, must be unique within your account
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[pulumi.Input[str]]:
"""
The network for the cluster, if not declare we use the default one
"""
return pulumi.get(self, "network_id")
@network_id.setter
def network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_id", value)
@property
@pulumi.getter(name="numTargetNodes")
def num_target_nodes(self) -> Optional[pulumi.Input[int]]:
"""
The number of instances to create (optional, the default at the time of writing is 3)
"""
return pulumi.get(self, "num_target_nodes")
@num_target_nodes.setter
def num_target_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_target_nodes", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region for the cluster, if not declare we use the region in declared in the provider
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[str]]:
"""
Space separated list of tags, to be used freely as required
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="targetNodesSize")
def target_nodes_size(self) -> Optional[pulumi.Input[str]]:
"""
The size of each node (optional, the default is currently g3.k3s.medium)
"""
return pulumi.get(self, "target_nodes_size")
@target_nodes_size.setter
def target_nodes_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_nodes_size", value)
@pulumi.input_type
class _KubernetesClusterState:
def __init__(__self__, *,
api_endpoint: Optional[pulumi.Input[str]] = None,
applications: Optional[pulumi.Input[str]] = None,
created_at: Optional[pulumi.Input[str]] = None,
dns_entry: Optional[pulumi.Input[str]] = None,
firewall_id: Optional[pulumi.Input[str]] = None,
installed_applications: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterInstalledApplicationArgs']]]] = None,
instances: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterInstanceArgs']]]] = None,
kubeconfig: Optional[pulumi.Input[str]] = None,
kubernetes_version: Optional[pulumi.Input[str]] = None,
master_ip: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
num_target_nodes: Optional[pulumi.Input[int]] = None,
pools: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterPoolArgs']]]] = None,
ready: Optional[pulumi.Input[bool]] = None,
region: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_nodes_size: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering KubernetesCluster resources.
:param pulumi.Input[str] api_endpoint: The API server endpoint of the cluster
:param pulumi.Input[str] applications: Comma separated list of applications to install. Spaces within application names are fine, but shouldn't be either side of the comma. Application names are case-sensitive; the available applications can be listed with the Civo CLI: 'civo kubernetes applications ls'. If you want to remove a default installed application, prefix it with a '-', e.g. -Traefik. For application that supports plans, you can use 'app*name:app*plan' format e.g. 'Linkerd:Linkerd & Jaeger' or 'MariaDB:5GB'.
:param pulumi.Input[str] created_at: The timestamp when the cluster was created
:param pulumi.Input[str] dns_entry: The DNS name of the cluster
:param pulumi.Input[str] firewall_id: The existing firewall ID to use for this cluster
:param pulumi.Input[str] kubeconfig: The kubeconfig of the cluster
:param pulumi.Input[str] kubernetes_version: The version of k3s to install (optional, the default is currently the latest available)
:param pulumi.Input[str] master_ip: The IP address of the master node
:param pulumi.Input[str] name: Name for your cluster, must be unique within your account
:param pulumi.Input[str] network_id: The network for the cluster, if not declare we use the default one
:param pulumi.Input[int] num_target_nodes: The number of instances to create (optional, the default at the time of writing is 3)
:param pulumi.Input[bool] ready: When cluster is ready, this will return `true`
:param pulumi.Input[str] region: The region for the cluster, if not declare we use the region in declared in the provider
:param pulumi.Input[str] status: Status of the cluster
:param pulumi.Input[str] tags: Space separated list of tags, to be used freely as required
:param pulumi.Input[str] target_nodes_size: The size of each node (optional, the default is currently g3.k3s.medium)
"""
if api_endpoint is not None:
pulumi.set(__self__, "api_endpoint", api_endpoint)
if applications is not None:
pulumi.set(__self__, "applications", applications)
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if dns_entry is not None:
pulumi.set(__self__, "dns_entry", dns_entry)
if firewall_id is not None:
pulumi.set(__self__, "firewall_id", firewall_id)
if installed_applications is not None:
pulumi.set(__self__, "installed_applications", installed_applications)
if instances is not None:
pulumi.set(__self__, "instances", instances)
if kubeconfig is not None:
pulumi.set(__self__, "kubeconfig", kubeconfig)
if kubernetes_version is not None:
pulumi.set(__self__, "kubernetes_version", kubernetes_version)
if master_ip is not None:
pulumi.set(__self__, "master_ip", master_ip)
if name is not None:
pulumi.set(__self__, "name", name)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if num_target_nodes is not None:
pulumi.set(__self__, "num_target_nodes", num_target_nodes)
if pools is not None:
pulumi.set(__self__, "pools", pools)
if ready is not None:
pulumi.set(__self__, "ready", ready)
if region is not None:
pulumi.set(__self__, "region", region)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if target_nodes_size is not None:
pulumi.set(__self__, "target_nodes_size", target_nodes_size)
@property
@pulumi.getter(name="apiEndpoint")
def api_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The API server endpoint of the cluster
"""
return pulumi.get(self, "api_endpoint")
@api_endpoint.setter
def api_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_endpoint", value)
@property
@pulumi.getter
def applications(self) -> Optional[pulumi.Input[str]]:
"""
Comma separated list of applications to install. Spaces within application names are fine, but shouldn't be either side of the comma. Application names are case-sensitive; the available applications can be listed with the Civo CLI: 'civo kubernetes applications ls'. If you want to remove a default installed application, prefix it with a '-', e.g. -Traefik. For application that supports plans, you can use 'app*name:app*plan' format e.g. 'Linkerd:Linkerd & Jaeger' or 'MariaDB:5GB'.
"""
return pulumi.get(self, "applications")
@applications.setter
def applications(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "applications", value)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
The timestamp when the cluster was created
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter(name="dnsEntry")
def dns_entry(self) -> Optional[pulumi.Input[str]]:
"""
The DNS name of the cluster
"""
return pulumi.get(self, "dns_entry")
@dns_entry.setter
def dns_entry(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_entry", value)
@property
@pulumi.getter(name="firewallId")
def firewall_id(self) -> Optional[pulumi.Input[str]]:
"""
The existing firewall ID to use for this cluster
"""
return pulumi.get(self, "firewall_id")
@firewall_id.setter
def firewall_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "firewall_id", value)
@property
@pulumi.getter(name="installedApplications")
def installed_applications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterInstalledApplicationArgs']]]]:
return pulumi.get(self, "installed_applications")
@installed_applications.setter
def installed_applications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterInstalledApplicationArgs']]]]):
pulumi.set(self, "installed_applications", value)
@property
@pulumi.getter
def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterInstanceArgs']]]]:
return pulumi.get(self, "instances")
@instances.setter
def instances(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterInstanceArgs']]]]):
pulumi.set(self, "instances", value)
@property
@pulumi.getter
def kubeconfig(self) -> Optional[pulumi.Input[str]]:
"""
The kubeconfig of the cluster
"""
return pulumi.get(self, "kubeconfig")
@kubeconfig.setter
def kubeconfig(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kubeconfig", value)
@property
@pulumi.getter(name="kubernetesVersion")
def kubernetes_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of k3s to install (optional, the default is currently the latest available)
"""
return pulumi.get(self, "kubernetes_version")
@kubernetes_version.setter
def kubernetes_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kubernetes_version", value)
@property
@pulumi.getter(name="masterIp")
def master_ip(self) -> Optional[pulumi.Input[str]]:
"""
The IP address of the master node
"""
return pulumi.get(self, "master_ip")
@master_ip.setter
def master_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_ip", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name for your cluster, must be unique within your account
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[pulumi.Input[str]]:
"""
The network for the cluster, if not declare we use the default one
"""
return pulumi.get(self, "network_id")
@network_id.setter
def network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_id", value)
@property
@pulumi.getter(name="numTargetNodes")
def num_target_nodes(self) -> Optional[pulumi.Input[int]]:
"""
The number of instances to create (optional, the default at the time of writing is 3)
"""
return pulumi.get(self, "num_target_nodes")
@num_target_nodes.setter
def num_target_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_target_nodes", value)
@property
@pulumi.getter
def pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterPoolArgs']]]]:
return pulumi.get(self, "pools")
@pools.setter
def pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesClusterPoolArgs']]]]):
pulumi.set(self, "pools", value)
@property
@pulumi.getter
def ready(self) -> Optional[pulumi.Input[bool]]:
"""
When cluster is ready, this will return `true`
"""
return pulumi.get(self, "ready")
@ready.setter
def ready(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ready", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region for the cluster, if not declare we use the region in declared in the provider
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Status of the cluster
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[str]]:
"""
Space separated list of tags, to be used freely as required
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="targetNodesSize")
def target_nodes_size(self) -> Optional[pulumi.Input[str]]:
"""
The size of each node (optional, the default is currently g3.k3s.medium)
"""
return pulumi.get(self, "target_nodes_size")
@target_nodes_size.setter
def target_nodes_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_nodes_size", value)
class KubernetesCluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
applications: Optional[pulumi.Input[str]] = None,
firewall_id: Optional[pulumi.Input[str]] = None,
kubernetes_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
num_target_nodes: Optional[pulumi.Input[int]] = None,
region: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_nodes_size: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Civo Kubernetes cluster resource. This can be used to create, delete, and modify clusters.
## Import
# using ID
```sh
$ pulumi import civo:index/kubernetesCluster:KubernetesCluster my-cluster 1b8b2100-0e9f-4e8f-ad78-9eb578c2a0af
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] applications: Comma separated list of applications to install. Spaces within application names are fine, but shouldn't be either side of the comma. Application names are case-sensitive; the available applications can be listed with the Civo CLI: 'civo kubernetes applications ls'. If you want to remove a default installed application, prefix it with a '-', e.g. -Traefik. For application that supports plans, you can use 'app*name:app*plan' format e.g. 'Linkerd:Linkerd & Jaeger' or 'MariaDB:5GB'.
:param pulumi.Input[str] firewall_id: The existing firewall ID to use for this cluster
:param pulumi.Input[str] kubernetes_version: The version of k3s to install (optional, the default is currently the latest available)
:param pulumi.Input[str] name: Name for your cluster, must be unique within your account
:param pulumi.Input[str] network_id: The network for the cluster, if not declare we use the default one
:param pulumi.Input[int] num_target_nodes: The number of instances to create (optional, the default at the time of writing is 3)
:param pulumi.Input[str] region: The region for the cluster, if not declare we use the region in declared in the provider
:param pulumi.Input[str] tags: Space separated list of tags, to be used freely as required
:param pulumi.Input[str] target_nodes_size: The size of each node (optional, the default is currently g3.k3s.medium)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: KubernetesClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Civo Kubernetes cluster resource. This can be used to create, delete, and modify clusters.
## Import
# using ID
```sh
$ pulumi import civo:index/kubernetesCluster:KubernetesCluster my-cluster 1b8b2100-0e9f-4e8f-ad78-9eb578c2a0af
```
:param str resource_name: The name of the resource.
:param KubernetesClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KubernetesClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
applications: Optional[pulumi.Input[str]] = None,
firewall_id: Optional[pulumi.Input[str]] = None,
kubernetes_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
num_target_nodes: Optional[pulumi.Input[int]] = None,
region: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_nodes_size: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = KubernetesClusterArgs.__new__(KubernetesClusterArgs)
__props__.__dict__["applications"] = applications
if firewall_id is None and not opts.urn:
raise TypeError("Missing required property 'firewall_id'")
__props__.__dict__["firewall_id"] = firewall_id
__props__.__dict__["kubernetes_version"] = kubernetes_version
__props__.__dict__["name"] = name
__props__.__dict__["network_id"] = network_id
__props__.__dict__["num_target_nodes"] = num_target_nodes
__props__.__dict__["region"] = region
__props__.__dict__["tags"] = tags
__props__.__dict__["target_nodes_size"] = target_nodes_size
__props__.__dict__["api_endpoint"] = None
__props__.__dict__["created_at"] = None
__props__.__dict__["dns_entry"] = None
__props__.__dict__["installed_applications"] = None
__props__.__dict__["instances"] = None
__props__.__dict__["kubeconfig"] = None
__props__.__dict__["master_ip"] = None
__props__.__dict__["pools"] = None
__props__.__dict__["ready"] = None
__props__.__dict__["status"] = None
super(KubernetesCluster, __self__).__init__(
'civo:index/kubernetesCluster:KubernetesCluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_endpoint: Optional[pulumi.Input[str]] = None,
applications: Optional[pulumi.Input[str]] = None,
created_at: Optional[pulumi.Input[str]] = None,
dns_entry: Optional[pulumi.Input[str]] = None,
firewall_id: Optional[pulumi.Input[str]] = None,
installed_applications: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['KubernetesClusterInstalledApplicationArgs']]]]] = None,
instances: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['KubernetesClusterInstanceArgs']]]]] = None,
kubeconfig: Optional[pulumi.Input[str]] = None,
kubernetes_version: Optional[pulumi.Input[str]] = None,
master_ip: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
num_target_nodes: Optional[pulumi.Input[int]] = None,
pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['KubernetesClusterPoolArgs']]]]] = None,
ready: Optional[pulumi.Input[bool]] = None,
region: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_nodes_size: Optional[pulumi.Input[str]] = None) -> 'KubernetesCluster':
"""
Get an existing KubernetesCluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_endpoint: The API server endpoint of the cluster
:param pulumi.Input[str] applications: Comma separated list of applications to install. Spaces within application names are fine, but shouldn't be either side of the comma. Application names are case-sensitive; the available applications can be listed with the Civo CLI: 'civo kubernetes applications ls'. If you want to remove a default installed application, prefix it with a '-', e.g. -Traefik. For application that supports plans, you can use 'app*name:app*plan' format e.g. 'Linkerd:Linkerd & Jaeger' or 'MariaDB:5GB'.
:param pulumi.Input[str] created_at: The timestamp when the cluster was created
:param pulumi.Input[str] dns_entry: The DNS name of the cluster
:param pulumi.Input[str] firewall_id: The existing firewall ID to use for this cluster
:param pulumi.Input[str] kubeconfig: The kubeconfig of the cluster
:param pulumi.Input[str] kubernetes_version: The version of k3s to install (optional, the default is currently the latest available)
:param pulumi.Input[str] master_ip: The IP address of the master node
:param pulumi.Input[str] name: Name for your cluster, must be unique within your account
:param pulumi.Input[str] network_id: The network for the cluster, if not declare we use the default one
:param pulumi.Input[int] num_target_nodes: The number of instances to create (optional, the default at the time of writing is 3)
:param pulumi.Input[bool] ready: When cluster is ready, this will return `true`
:param pulumi.Input[str] region: The region for the cluster, if not declare we use the region in declared in the provider
:param pulumi.Input[str] status: Status of the cluster
:param pulumi.Input[str] tags: Space separated list of tags, to be used freely as required
:param pulumi.Input[str] target_nodes_size: The size of each node (optional, the default is currently g3.k3s.medium)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _KubernetesClusterState.__new__(_KubernetesClusterState)
__props__.__dict__["api_endpoint"] = api_endpoint
__props__.__dict__["applications"] = applications
__props__.__dict__["created_at"] = created_at
__props__.__dict__["dns_entry"] = dns_entry
__props__.__dict__["firewall_id"] = firewall_id
__props__.__dict__["installed_applications"] = installed_applications
__props__.__dict__["instances"] = instances
__props__.__dict__["kubeconfig"] = kubeconfig
__props__.__dict__["kubernetes_version"] = kubernetes_version
__props__.__dict__["master_ip"] = master_ip
__props__.__dict__["name"] = name
__props__.__dict__["network_id"] = network_id
__props__.__dict__["num_target_nodes"] = num_target_nodes
__props__.__dict__["pools"] = pools
__props__.__dict__["ready"] = ready
__props__.__dict__["region"] = region
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
__props__.__dict__["target_nodes_size"] = target_nodes_size
return KubernetesCluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiEndpoint")
def api_endpoint(self) -> pulumi.Output[str]:
"""
The API server endpoint of the cluster
"""
return pulumi.get(self, "api_endpoint")
@property
@pulumi.getter
def applications(self) -> pulumi.Output[Optional[str]]:
"""
Comma separated list of applications to install. Spaces within application names are fine, but shouldn't be either side of the comma. Application names are case-sensitive; the available applications can be listed with the Civo CLI: 'civo kubernetes applications ls'. If you want to remove a default installed application, prefix it with a '-', e.g. -Traefik. For application that supports plans, you can use 'app*name:app*plan' format e.g. 'Linkerd:Linkerd & Jaeger' or 'MariaDB:5GB'.
"""
return pulumi.get(self, "applications")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The timestamp when the cluster was created
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="dnsEntry")
def dns_entry(self) -> pulumi.Output[str]:
"""
The DNS name of the cluster
"""
return pulumi.get(self, "dns_entry")
@property
@pulumi.getter(name="firewallId")
def firewall_id(self) -> pulumi.Output[str]:
"""
The existing firewall ID to use for this cluster
"""
return pulumi.get(self, "firewall_id")
@property
@pulumi.getter(name="installedApplications")
def installed_applications(self) -> pulumi.Output[Sequence['outputs.KubernetesClusterInstalledApplication']]:
return pulumi.get(self, "installed_applications")
@property
@pulumi.getter
def instances(self) -> pulumi.Output[Sequence['outputs.KubernetesClusterInstance']]:
return pulumi.get(self, "instances")
@property
@pulumi.getter
def kubeconfig(self) -> pulumi.Output[str]:
"""
The kubeconfig of the cluster
"""
return pulumi.get(self, "kubeconfig")
@property
@pulumi.getter(name="kubernetesVersion")
def kubernetes_version(self) -> pulumi.Output[str]:
"""
The version of k3s to install (optional, the default is currently the latest available)
"""
return pulumi.get(self, "kubernetes_version")
@property
@pulumi.getter(name="masterIp")
def master_ip(self) -> pulumi.Output[str]:
"""
The IP address of the master node
"""
return pulumi.get(self, "master_ip")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name for your cluster, must be unique within your account
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkId")
def network_id(self) -> pulumi.Output[str]:
"""
The network for the cluster, if not declare we use the default one
"""
return pulumi.get(self, "network_id")
@property
@pulumi.getter(name="numTargetNodes")
def num_target_nodes(self) -> pulumi.Output[int]:
"""
The number of instances to create (optional, the default at the time of writing is 3)
"""
return pulumi.get(self, "num_target_nodes")
@property
@pulumi.getter
def pools(self) -> pulumi.Output[Sequence['outputs.KubernetesClusterPool']]:
return pulumi.get(self, "pools")
@property
@pulumi.getter
def ready(self) -> pulumi.Output[bool]:
"""
When cluster is ready, this will return `true`
"""
return pulumi.get(self, "ready")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region for the cluster, if not declare we use the region in declared in the provider
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Status of the cluster
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[str]]:
"""
Space separated list of tags, to be used freely as required
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetNodesSize")
def target_nodes_size(self) -> pulumi.Output[str]:
"""
The size of each node (optional, the default is currently g3.k3s.medium)
"""
return pulumi.get(self, "target_nodes_size")
| 46.001252
| 531
| 0.656101
| 4,493
| 36,755
| 5.175161
| 0.05542
| 0.088466
| 0.085498
| 0.087992
| 0.87343
| 0.839842
| 0.800404
| 0.781309
| 0.761225
| 0.715508
| 0
| 0.002579
| 0.240512
| 36,755
| 798
| 532
| 46.058897
| 0.830378
| 0.318759
| 0
| 0.684318
| 1
| 0
| 0.109157
| 0.031078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.167006
| false
| 0.002037
| 0.014257
| 0.01222
| 0.285132
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
379e0be25075ea18732e7608cc842a10d5a818ad
| 3,998
|
py
|
Python
|
tests/test_key_existence_validator_visitor.py
|
cyberlis/dictquery
|
47096fe1c66cc48d59cedf1f7f919785a1664c16
|
[
"MIT"
] | 65
|
2018-03-24T08:30:07.000Z
|
2022-02-14T15:13:57.000Z
|
tests/test_key_existence_validator_visitor.py
|
cyberlis/dictquery
|
47096fe1c66cc48d59cedf1f7f919785a1664c16
|
[
"MIT"
] | 1
|
2021-01-28T14:36:21.000Z
|
2021-06-29T19:13:33.000Z
|
tests/test_key_existence_validator_visitor.py
|
cyberlis/dictquery
|
47096fe1c66cc48d59cedf1f7f919785a1664c16
|
[
"MIT"
] | 3
|
2018-04-10T18:16:06.000Z
|
2021-02-03T14:46:45.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime
import unittest
from dictquery.exceptions import DQValidationError
from dictquery.parsers import DataQueryParser
from dictquery.visitors import KeyExistenceValidatorVisitor
class TestKeyExistenceValidatorVisitor(unittest.TestCase):
def test_visit_values(self):
parser = DataQueryParser()
ast = parser.parse('12')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('"hello"')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('NONE')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('True')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('NOW')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse(r'/\d+/')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
def test_visit_and(self):
parser = DataQueryParser()
ast = parser.parse('12 AND 45')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('hello AND 45')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('hello == 3 AND 45')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('hello == 3 AND world > 3')
visitor = KeyExistenceValidatorVisitor(ast)
self.assertTrue(visitor.evaluate())
def test_visit_or(self):
parser = DataQueryParser()
ast = parser.parse('12 OR 45')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('hello OR 45')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('hello == 3 OR 45')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('hello == 3 OR world > 3')
visitor = KeyExistenceValidatorVisitor(ast)
self.assertTrue(visitor.evaluate())
def test_visit_not(self):
parser = DataQueryParser()
ast = parser.parse('NOT 3')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('NOT False')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('NOT hello')
visitor = KeyExistenceValidatorVisitor(ast)
self.assertTrue(visitor.evaluate())
def test_visit_binary_ops(self):
parser = DataQueryParser()
ast = parser.parse('12 == 45')
visitor = KeyExistenceValidatorVisitor(ast)
with self.assertRaises(DQValidationError):
visitor.evaluate()
ast = parser.parse('hello == 3')
visitor = KeyExistenceValidatorVisitor(ast)
self.assertTrue(visitor.evaluate())
ast = parser.parse('3 == hello')
visitor = KeyExistenceValidatorVisitor(ast)
self.assertTrue(visitor.evaluate())
if __name__ == '__main__':
unittest.main()
| 33.596639
| 59
| 0.649575
| 348
| 3,998
| 7.408046
| 0.140805
| 0.069822
| 0.108611
| 0.244375
| 0.873933
| 0.863848
| 0.84872
| 0.785105
| 0.729247
| 0.694337
| 0
| 0.01071
| 0.252626
| 3,998
| 118
| 60
| 33.881356
| 0.852075
| 0.005253
| 0
| 0.645161
| 0
| 0
| 0.051321
| 0
| 0
| 0
| 0
| 0
| 0.215054
| 1
| 0.053763
| false
| 0
| 0.053763
| 0
| 0.11828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37b878a3af165e43a69e1f6e4c2595069dc4d1d9
| 5,922
|
py
|
Python
|
data-generation/get-best-seller-info-alternative.py
|
tanykim/best-bookshelf
|
48f2a6d1a4bc11f3aae9c29b22b84425fcbf381d
|
[
"MIT"
] | 19
|
2017-07-16T15:29:32.000Z
|
2022-03-22T19:58:52.000Z
|
data-generation/get-best-seller-info-alternative.py
|
tanykim/best-bookshelf
|
48f2a6d1a4bc11f3aae9c29b22b84425fcbf381d
|
[
"MIT"
] | null | null | null |
data-generation/get-best-seller-info-alternative.py
|
tanykim/best-bookshelf
|
48f2a6d1a4bc11f3aae9c29b22b84425fcbf381d
|
[
"MIT"
] | 8
|
2017-12-04T08:05:19.000Z
|
2020-10-12T04:21:41.000Z
|
import itertools
import json
from datetime import datetime
####
# This file is only for when NYTimes API is fucked up, saying invalid authorization
####
def get_best_seller_info(t):
genre = ''
best_seller = {}
r = t['ranks_history']
history = list(map(lambda x: dict(
date=datetime.strptime(x['bestsellers_date'], '%Y-%m-%d').strftime('%b %d, %Y'),
rank=x['rank'],
list=x['display_name'],
weeks=x['weeks_on_list']), r))
for key, group in itertools.groupby(history, lambda x: x['list']):
best_seller[key] = list(map(lambda x: dict(date=x['date'], rank=x['rank'], weeks=x['weeks']), list(group)))
if 'Nonfiction' in key:
genre = 'Nonfiction'
elif 'Fiction' in key:
genre = 'Fiction'
return dict(best_seller=best_seller, genre=genre)
# manually get the data from Chrom URL, and copy the matching result element
t = {"title":"GRANT","description":"A biography of the Union general of the Civil War and two-term president of the United States.","contributor":"by Ron Chernow","author":"Ron Chernow","contributor_note":"","price":0,"age_group":"","publisher":"Penguin Press","isbns":[{"isbn10":"159420487X","isbn13":"9781594204876"},{"isbn10":"052552195X","isbn13":"9780525521952"}],"ranks_history":[{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":3,"list_name":"Hardcover Nonfiction","display_name":"Hardcover Nonfiction","published_date":"2017-12-31","bestsellers_date":"2017-12-16","weeks_on_list":10,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":4,"list_name":"Combined Print and E-Book Nonfiction","display_name":"Combined Print & E-Book Nonfiction","published_date":"2017-12-31","bestsellers_date":"2017-12-16","weeks_on_list":10,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":2,"list_name":"Hardcover Nonfiction","display_name":"Hardcover Nonfiction","published_date":"2017-12-24","bestsellers_date":"2017-12-09","weeks_on_list":9,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":2,"list_name":"Combined Print and E-Book Nonfiction","display_name":"Combined Print & E-Book Nonfiction","published_date":"2017-12-24","bestsellers_date":"2017-12-09","weeks_on_list":9,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":3,"list_name":"Combined Print and E-Book Nonfiction","display_name":"Combined Print & E-Book Nonfiction","published_date":"2017-12-17","bestsellers_date":"2017-12-02","weeks_on_list":8,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":2,"list_name":"Hardcover Nonfiction","display_name":"Hardcover Nonfiction","published_date":"2017-12-17","bestsellers_date":"2017-12-02","weeks_on_list":8,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":4,"list_name":"Hardcover Nonfiction","display_name":"Hardcover Nonfiction","published_date":"2017-12-10","bestsellers_date":"2017-11-25","weeks_on_list":7,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":6,"list_name":"Combined Print and E-Book Nonfiction","display_name":"Combined Print & E-Book Nonfiction","published_date":"2017-12-10","bestsellers_date":"2017-11-25","weeks_on_list":7,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":6,"list_name":"Hardcover Nonfiction","display_name":"Hardcover Nonfiction","published_date":"2017-12-03","bestsellers_date":"2017-11-18","weeks_on_list":6,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":7,"list_name":"Combined Print and E-Book Nonfiction","display_name":"Combined Print & E-Book Nonfiction","published_date":"2017-12-03","bestsellers_date":"2017-11-18","weeks_on_list":6,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":4,"list_name":"Hardcover Nonfiction","display_name":"Hardcover Nonfiction","published_date":"2017-11-26","bestsellers_date":"2017-11-11","weeks_on_list":5,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":5,"list_name":"Combined Print and E-Book Nonfiction","display_name":"Combined Print & E-Book Nonfiction","published_date":"2017-11-26","bestsellers_date":"2017-11-11","weeks_on_list":5,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":5,"list_name":"Hardcover Nonfiction","display_name":"Hardcover Nonfiction","published_date":"2017-11-19","bestsellers_date":"2017-11-04","weeks_on_list":4,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":5,"list_name":"Combined Print and E-Book Nonfiction","display_name":"Combined Print & E-Book Nonfiction","published_date":"2017-11-19","bestsellers_date":"2017-11-04","weeks_on_list":4,"ranks_last_week":0,"asterisk":0,"dagger":0},{"primary_isbn10":"159420487X","primary_isbn13":"9781594204876","rank":5,"list_name":"Combined Print and E-Book Nonfiction","display_name":"Combined Print & E-Book Nonfiction","published_date":"2017-11-12","bestsellers_date":"2017-10-28","weeks_on_list":3,"ranks_last_week":0,"asterisk":0,"dagger":0}],"reviews":[{"book_review_link":"https://www.nytimes.com/2017/10/12/books/review/ron-chernow-ulysses-s-grant-biography-bill-clinton.html","first_chapter_link":"","sunday_review_link":"","article_chapter_link":""}]};
data = get_best_seller_info(t)
json_data = json.dumps(data, ensure_ascii=True)
print (json_data)
| 185.0625
| 4,908
| 0.737589
| 853
| 5,922
| 4.910903
| 0.181712
| 0.057293
| 0.042015
| 0.107424
| 0.756028
| 0.747434
| 0.73693
| 0.73693
| 0.729768
| 0.729768
| 0
| 0.136947
| 0.053023
| 5,922
| 31
| 4,909
| 191.032258
| 0.610021
| 0.026342
| 0
| 0
| 0
| 0.043478
| 0.688966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.130435
| 0
| 0.217391
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
80d5029266b11dcc7e24d0745f92fddf4df98c86
| 285
|
py
|
Python
|
platform/core/polyaxon/conf/options/auth_bitbucket.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/conf/options/auth_bitbucket.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/conf/options/auth_bitbucket.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
import conf
from options.registry import auth_bitbucket
conf.subscribe(auth_bitbucket.AuthBitbucketEnabled)
conf.subscribe(auth_bitbucket.AuthBitbucketVerificationSchedule)
conf.subscribe(auth_bitbucket.AuthBitbucketClientId)
conf.subscribe(auth_bitbucket.AuthBitbucketClientSecret)
| 31.666667
| 64
| 0.898246
| 28
| 285
| 8.964286
| 0.428571
| 0.258964
| 0.270916
| 0.414343
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 285
| 8
| 65
| 35.625
| 0.919414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
ff199c568dda2966b0aa68ed2fc14b6248ca9304
| 102
|
py
|
Python
|
src_Python/EtabsAPIaface0/a01comtypes/ex001.py
|
fjmucho/APIdeEtabsYPython
|
a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523
|
[
"MIT"
] | null | null | null |
src_Python/EtabsAPIaface0/a01comtypes/ex001.py
|
fjmucho/APIdeEtabsYPython
|
a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523
|
[
"MIT"
] | null | null | null |
src_Python/EtabsAPIaface0/a01comtypes/ex001.py
|
fjmucho/APIdeEtabsYPython
|
a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523
|
[
"MIT"
] | null | null | null |
from comtypes.client import CreateObject
from comtypes.client import ShowEvents
help(ShowEvents)
| 20.4
| 41
| 0.823529
| 12
| 102
| 7
| 0.583333
| 0.285714
| 0.428571
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 102
| 4
| 42
| 25.5
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
209b03b12ddc257cfd16ad41dca552bb4f95e254
| 212
|
py
|
Python
|
config.py
|
safir72347/insta-bot
|
de22bd7cbaa2ca705664b6f94336d5a5e3d1e6a3
|
[
"MIT"
] | null | null | null |
config.py
|
safir72347/insta-bot
|
de22bd7cbaa2ca705664b6f94336d5a5e3d1e6a3
|
[
"MIT"
] | null | null | null |
config.py
|
safir72347/insta-bot
|
de22bd7cbaa2ca705664b6f94336d5a5e3d1e6a3
|
[
"MIT"
] | null | null | null |
class login_details:
def __init__(self):
self.username = ""
self.password = ""
def get_username(self):
return self.username
def get_password(self):
return self.password
| 26.5
| 28
| 0.617925
| 24
| 212
| 5.166667
| 0.416667
| 0.193548
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287736
| 212
| 8
| 29
| 26.5
| 0.821192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.375
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
20dc0a4855cee295781e8fdef86a7bbb6f6424e3
| 79,518
|
py
|
Python
|
gpMgmt/bin/gppylib/test/unit/test_unit_gprestore_filter.py
|
shahin/gpdb
|
3909ad6b2d9bc06ed4659d2c9223fc12b9409a33
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/test/unit/test_unit_gprestore_filter.py
|
shahin/gpdb
|
3909ad6b2d9bc06ed4659d2c9223fc12b9409a33
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/test/unit/test_unit_gprestore_filter.py
|
shahin/gpdb
|
3909ad6b2d9bc06ed4659d2c9223fc12b9409a33
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2018-12-04T09:13:57.000Z
|
2018-12-04T09:13:57.000Z
|
#!/usr/bin/env python
# coding: utf-8
import os, sys
import unittest2 as unittest
from gppylib import gplog
from mock import patch
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from gprestore_filter import get_table_schema_set, extract_schema, extract_table, \
process_data, get_table_info, process_schema, check_valid_schema, check_valid_relname, \
check_dropped_table, get_table_from_alter_table
logger = gplog.get_unittest_logger()
class GpRestoreFilterTestCase(unittest.TestCase):
def test_get_table_schema_set00(self):
fname = os.path.join(os.getcwd(), 'test1')
with open(fname, 'w') as fd:
fd.write('public.ao1\n')
fd.write(' pepper.ao2 \n')
(sc, tb) = get_table_schema_set(fname)
self.assertEquals(sc, set(['public', ' pepper']))
self.assertEquals(tb, set([('public','ao1'), (' pepper','ao2 ')]))
os.remove(fname)
def test_get_table_schema_set01(self):
fname = os.path.join(os.getcwd(), 'test1')
with open(fname, 'w') as fd:
fd.write('publicao1\n')
fd.write(' pepper.ao2 \n')
with self.assertRaisesRegexp(Exception, "need more than 1 value to unpack"):
get_table_schema_set(fname)
os.remove(fname)
def test_get_table_schema_set02(self):
fname = os.path.join(os.getcwd(), 'test1')
with open(fname, 'w') as fd:
fd.write('')
(sc, tb) = get_table_schema_set(fname)
self.assertEquals(sc, set())
self.assertEquals(tb, set())
os.remove(fname)
def test_extract_schema00(self):
line = 'SET search_path = pepper, pg_catalog;'
schema = extract_schema(line)
self.assertEquals(schema, 'pepper')
def test_extract_schema01(self):
line = 'SET search_path = pepper pg_catalog;'
with self.assertRaisesRegexp(Exception, "Failed to extract schema name"):
schema = extract_schema(line)
def test_extract_table00(self):
line = 'COPY ao_table (column1, column2, column3) FROM stdin;'
table = extract_table(line)
self.assertEqual(table, 'ao_table')
def test_extract_table01(self):
line = 'COPYao_table(column1column2column3)FROMstdin;'
with self.assertRaisesRegexp(Exception, "Failed to extract table name"):
table = extract_table(line)
def test_get_table_from_alter_table_with_schemaname(self):
line = 'ALTER TABLE schema1.table1 OWNER TO gpadmin;'
alter_expr = "ALTER TABLE"
res = get_table_from_alter_table(line, alter_expr)
self.assertEqual(res, 'table1')
def test_get_table_from_alter_table_without_schemaname(self):
line = 'ALTER TABLE table1 OWNER TO gpadmin;'
alter_expr = "ALTER TABLE"
res = get_table_from_alter_table(line, alter_expr)
self.assertEqual(res, 'table1')
def test_get_table_from_alter_table_with_specialchar(self):
line = 'ALTER TABLE Tab#$_1 OWNER TO gpadmin;'
alter_expr = "ALTER TABLE"
res = get_table_from_alter_table(line, alter_expr)
self.assertEqual(res, 'Tab#$_1')
def test_get_table_from_alter_table_with_specialchar_and_schema(self):
line = 'ALTER TABLE "Foo#$1"."Tab#$_1" OWNER TO gpadmin;'
alter_expr = "ALTER TABLE"
res = get_table_from_alter_table(line, alter_expr)
self.assertEqual(res, '"Tab#$_1"')
def test_get_table_from_alter_table_with_specialchar(self):
line = 'ALTER TABLE "T a""b#$_1" OWNER TO gpadmin;'
alter_expr = "ALTER TABLE"
res = get_table_from_alter_table(line, alter_expr)
self.assertEqual(res, '"T a""b#$_1"')
def test_get_table_from_alter_table_with_specialchar_and_double_quoted_schema(self):
line = 'ALTER TABLE "schema1".table1 OWNER TO gpadmin;'
alter_expr = "ALTER TABLE"
res = get_table_from_alter_table(line, alter_expr)
self.assertEqual(res, 'table1')
def test_process_data00(self):
test_case_buf = """
--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET search_path = pepper, pg_catalog;
SET default_with_oids = false;
--
-- Data for Name: ao_table; Type: TABLE DATA; Schema: pepper; Owner: dcddev
--
COPY ao_table (column1, column2, column3) FROM stdin;
3 backup 2010-01-04
7 backup 2010-01-08
11 backup 2010-01-12
15 backup 2010-01-16
19 backup 2010-01-20
23 backup 2010-01-24
\.
--
-- Greenplum Database database dump complete
--
"""
expected_out = """SET search_path = pepper, pg_catalog;
COPY ao_table (column1, column2, column3) FROM stdin;
3 backup 2010-01-04
7 backup 2010-01-08
11 backup 2010-01-12
15 backup 2010-01-16
19 backup 2010-01-20
23 backup 2010-01-24
\.
"""
in_name = os.path.join(os.getcwd(), 'infile')
out_name = os.path.join(os.getcwd(), 'outfile')
with open(in_name, 'w') as fd:
fd.write(test_case_buf)
dump_schemas = set(['pepper'])
dump_tables = set([('pepper', 'ao_table')])
with open(out_name, 'w') as fdout:
with open(in_name, 'r') as fdin:
process_data(dump_schemas, dump_tables, fdin, fdout, None)
with open(out_name, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(in_name)
os.remove(out_name)
def test_process_data01(self):
test_case_buf = """
--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET search_path = public, pg_catalog;
SET default_with_oids = false;
--
-- Data for Name: ao_index_table; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY ao_index_table (column1, column2, column3) FROM stdin;
3 backup 2010-01-04
7 backup 2010-01-08
11 backup 2010-01-12
15 backup 2010-01-16
1091 restore 2012-12-27
\.
--
-- Data for Name: ao_part_table; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY ao_part_table (column1, column2, column3) FROM stdin;
\.
--
-- Data for Name: ao_part_table_1_prt_p1; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY ao_part_table_1_prt_p1 (column1, column2, column3) FROM stdin;
\.
--
-- Data for Name: ao_part_table_1_prt_p1_2_prt_1; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY ao_part_table_1_prt_p1_2_prt_1 (column1, column2, column3) FROM stdin;
2 backup 2010-01-03
6 backup 2010-01-07
10 backup 2010-01-11
14 backup 2010-01-15
18 backup 2010-01-19
22 backup 2010-01-23
26 backup 2010-01-27
30 backup 2010-01-31
34 backup 2010-02-04
361 backup 2010-12-28
\.
--
-- Data for Name: ao_part_table_1_prt_p1_2_prt_2; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY ao_part_table_1_prt_p1_2_prt_2 (column1, column2, column3) FROM stdin;
365 backup 2011-01-01
369 backup 2011-01-05
719 backup 2011-12-21
723 backup 2011-12-25
727 backup 2011-12-29
\.
--
-- Data for Name: ao_part_table_comp; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY ao_part_table_comp (column1, column2, column3) FROM stdin;
\.
--
-- Data for Name: ao_part_table_comp_1_prt_p1; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY ao_part_table_comp_1_prt_p1 (column1, column2, column3) FROM stdin;
\.
--
-- Data for Name: ao_part_table_comp_1_prt_p1_2_prt_1; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY ao_part_table_comp_1_prt_p1_2_prt_1 (column1, column2, column3) FROM stdin;
1 backup 2010-01-02
5 backup 2010-01-06
9 backup 2010-01-10
13 backup 2010-01-14
17 backup 2010-01-18
1063 restore 2012-11-29
1067 restore 2012-12-03
1071 restore 2012-12-07
1075 restore 2012-12-11
1079 restore 2012-12-15
1083 restore 2012-12-19
1087 restore 2012-12-23
1091 restore 2012-12-27
\.
--
-- Greenplum Database database dump complete
--
"""
expected_out = """SET search_path = public, pg_catalog;
COPY ao_part_table_1_prt_p1_2_prt_1 (column1, column2, column3) FROM stdin;
2 backup 2010-01-03
6 backup 2010-01-07
10 backup 2010-01-11
14 backup 2010-01-15
18 backup 2010-01-19
22 backup 2010-01-23
26 backup 2010-01-27
30 backup 2010-01-31
34 backup 2010-02-04
361 backup 2010-12-28
\.
COPY ao_part_table_comp_1_prt_p1_2_prt_1 (column1, column2, column3) FROM stdin;
1 backup 2010-01-02
5 backup 2010-01-06
9 backup 2010-01-10
13 backup 2010-01-14
17 backup 2010-01-18
1063 restore 2012-11-29
1067 restore 2012-12-03
1071 restore 2012-12-07
1075 restore 2012-12-11
1079 restore 2012-12-15
1083 restore 2012-12-19
1087 restore 2012-12-23
1091 restore 2012-12-27
\.
"""
in_name = os.path.join(os.getcwd(), 'infile')
out_name = os.path.join(os.getcwd(), 'outfile')
with open(in_name, 'w') as fd:
fd.write(test_case_buf)
dump_schemas = set(['public'])
dump_tables = set([('public', 'ao_part_table_comp_1_prt_p1_2_prt_1'), ('public', 'ao_part_table_1_prt_p1_2_prt_1')])
with open(out_name, 'w') as fdout:
with open(in_name, 'r') as fdin:
process_data(dump_schemas, dump_tables, fdin, fdout, None)
with open(out_name, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(in_name)
os.remove(out_name)
def test_process_data03(self):
test_case_buf = """
COPY ao_table (column1, column2, column3) FROM stdin;
3 backup 2010-01-04
7 backup 2010-01-08
11 backup 2010-01-12
15 backup 2010-01-16
19 backup 2010-01-20
23 backup 2010-01-24
\.
"""
expected_out = ''
in_name = os.path.join(os.getcwd(), 'infile')
out_name = os.path.join(os.getcwd(), 'outfile')
with open(in_name, 'w') as fd:
fd.write(test_case_buf)
dump_schemas = set(['public'])
dump_tables = set([('public', 'ao_table')])
with open(out_name, 'w') as fdout:
with open(in_name, 'r') as fdin:
process_data(dump_schemas, dump_tables, fdin, fdout, None)
with open(out_name, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(in_name)
os.remove(out_name)
def test_process_data04(self):
test_case_buf = """
--
-- Greenplum Database database dump
--
SET search_path = pepper, pg_catalog;
--
-- Data for Name: ao_table; Type: TABLE DATA; Schema: pepper; Owner: dcddev
--
COPY ao_table (column1, column2, column3) FROM stdin;
3 backup 2010-01-04
7 backup 2010-01-08
11 backup 2010-01-12
15 backup 2010-01-16
19 backup 2010-01-20
23 backup 2010-01-24
\.
--
-- Greenplum Database database dump complete
--
"""
expected_out = """SET search_path = pepper, pg_catalog;
"""
in_name = os.path.join(os.getcwd(), 'infile')
out_name = os.path.join(os.getcwd(), 'outfile')
with open(in_name, 'w') as fd:
fd.write(test_case_buf)
dump_schemas = set(['pepper'])
dump_tables = set([('pepper', 'ao_table')])
with open(out_name, 'w') as fdout:
with open(in_name, 'r') as fdin:
process_data(dump_schemas, dump_tables, fdin, fdout)
with open(out_name, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(in_name)
os.remove(out_name)
def test_process_data04(self):
test_case_buf = """
--
-- Greenplum Database database dump
--
SET search_path = pepper, pg_catalog;
--
-- Data for Name: ao_table; Type: TABLE DATA; Schema: pepper; Owner: dcddev
--
COPY ao_table (column1, column2, column3) FROM stdin;
3 backup 2010-01-04
7 backup 2010-01-08
11 backup 2010-01-12
15 backup 2010-01-16
19 backup 2010-01-20
23 backup 2010-01-24
\.
--
-- Greenplum Database database dump complete
--
"""
expected_out = """SET search_path = pepper, pg_catalog;
"""
in_name = os.path.join(os.getcwd(), 'infile')
out_name = os.path.join(os.getcwd(), 'outfile')
with open(in_name, 'w') as fd:
fd.write(test_case_buf)
dump_schemas = set(['pepper'])
dump_tables = set([('pepper', 'ao_table')])
with open(out_name, 'w') as fdout:
with open(in_name, 'r') as fdin:
process_data(dump_schemas, dump_tables, fdin, fdout, None)
with open(out_name, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(in_name)
os.remove(out_name)
def test_process_data_multi_byte_char(self):
test_case_buf = """SET search_path = public, pg_catalog;
--
-- Data for Name: 测试; Type: TABLE DATA; Schema: public; Owner: dcddev
--
COPY "测试" (column1, column2, column3) FROM stdin;
3 backup 2010-01-04
7 backup 2010-01-08
11 backup 2010-01-12
15 backup 2010-01-16
19 backup 2010-01-20
23 backup 2010-01-24
\.
"""
expected_out = """SET search_path = public, pg_catalog;
COPY "测试" (column1, column2, column3) FROM stdin;
3 backup 2010-01-04
7 backup 2010-01-08
11 backup 2010-01-12
15 backup 2010-01-16
19 backup 2010-01-20
23 backup 2010-01-24
\.
"""
in_name = os.path.join(os.getcwd(), 'infile')
out_name = os.path.join(os.getcwd(), 'outfile')
with open(in_name, 'w') as fd:
fd.write(test_case_buf)
dump_schemas = set(['public'])
dump_tables = set([('public', '测试')])
with open(out_name, 'w') as fdout:
with open(in_name, 'r') as fdin:
process_data(dump_schemas, dump_tables, fdin, fdout, None)
with open(out_name, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(in_name)
os.remove(out_name)
def test_get_table_info00(self):
line = ''
(name, type, schema) = get_table_info(line, '-- Name: ')
self.assertEquals(name, None)
self.assertEquals(type, None)
self.assertEquals(schema, None)
def test_get_table_info01(self):
line = '-- Name: public; Type: ACL; Schema: -; Owner: root'
comment_expr = '-- Name: '
(name, type, schema) = get_table_info(line, comment_expr)
self.assertEquals(name, 'public')
self.assertEquals(type, 'ACL')
self.assertEquals(schema, '-')
def test_process_schema00(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
dump_schemas = ['public']
dump_tables = [('public', 'heap_table1')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_matching_table(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
--
-- Name: heap_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
dump_schemas = ['public']
dump_tables = [('public', 'heap_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: heap_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_mismatched_table(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
dump_schemas = ['public']
dump_tables = [('pepper', 'heap_table1')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_mismatched_schema(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
dump_schemas = ['public']
dump_tables = [('public', 'heap_table1'), ('pepper','ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_missing_schema(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
dump_schemas = ['public']
dump_tables = [('public', 'heap_table1'), ('pepper','ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_matching_constraint(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
dump_schemas = ['public']
dump_tables = [('public', 'heap_table1'), ('public','ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_mismatched_constraint(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
dump_schemas = ['public']
dump_tables = [('public', 'heap_table1')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: heap_table1; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE heap_table1 (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1);"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_data(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Data: ao_part_table; Type: TABLE DATA; Schema: public; Owner: dcddev; Tablespace:
--
COPY ao_part_table from stdin;
1
2
3
4
5
6
\.
"""
dump_schemas = ['public']
dump_tables = [('public', 'ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Data: ao_part_table; Type: TABLE DATA; Schema: public; Owner: dcddev; Tablespace:
--
COPY ao_part_table from stdin;
1
2
3
4
5
6
\.
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_function(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Data: ao_part_table; Type: TABLE DATA; Schema: public; Owner: dcddev; Tablespace:
--
COPY ao_part_table from stdin;
1
2
3
4
5
6
\.
"""
dump_schemas = ['public']
dump_tables = [('public', 'ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Data: ao_part_table; Type: TABLE DATA; Schema: public; Owner: dcddev; Tablespace:
--
COPY ao_part_table from stdin;
1
2
3
4
5
6
\.
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_function_external_table(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
dump_schemas = ['public']
dump_tables = [('public', 'ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_matching_view_function_seq(self):
test_case_buf = """--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: gpadmin
--
COMMENT ON SCHEMA public IS 'Standard public schema';
--
-- Name: s1; Type: SCHEMA; Schema: -; Owner: gpadmin
--
CREATE SCHEMA s1;
ALTER SCHEMA s1 OWNER TO gpadmin;
SET search_path = public, pg_catalog;
--
-- Name: table1; Type: TABLE; Schema: public; Owner: gpadmin; Tablespace:
--
CREATE TABLE table1 (
i integer,
j text
) DISTRIBUTED BY (i);
;
ALTER TABLE public.table1 OWNER TO gpadmin;
--
-- Name: view1; Type: VIEW; Schema: public; Owner: gpadmin
--
CREATE VIEW view1 AS SELECT * FROM table1;
ALTER TABLE public.view1 OWNER TO gpadmin;
SET search_path = s1, pg_catalog;
--
-- Name: t1; Type: TABLE; Schema: s1; Owner: gpadmin; Tablespace:
--
CREATE TABLE t1 (
c1 integer,
c2 text,
c3 date
) DISTRIBUTED BY (c1);
;
ALTER TABLE s1.t1 OWNER TO gpadmin;
--
-- Name: v1; Type: VIEW; Schema: s1; Owner: gpadmin
--
CREATE VIEW v1 AS
SELECT t1.c1, t1.c2 FROM t1;
ALTER TABLE s1.v1 OWNER TO gpadmin;
--
-- Name: user_defined_function; Type: FUNCTION; Schema: s1; Owner: gpadmin; Tablespace:
--
CREATE FUNCTION user_defined_function as $$
print 'Hello, World'
$$ LANGUAGE as plpgsql;
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: s1; Owner: gpadmin; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)
--
-- Name: id_seq; Type: SEQUENCE; Schema: s1; Owner: gpadmin
--
CREATE SEQUENCE id_seq
START WITH 1
INCREMENT BY 1
NO MAXVALUE
NO MINVALUE
CACHE 1;
ALTER TABLE s1.id_seq OWNER TO gpadmin;"""
in_name = '/tmp/infile'
out_name = '/tmp/outfile'
with open(in_name, 'w') as fd:
fd.write(test_case_buf)
schema_level_restore_list=['s1']
with open(out_name, 'w') as fdout:
with open(in_name, 'r') as fdin:
process_schema(None, None, fdin, fdout, schema_level_restore_list=['s1'])
with open(out_name, 'r') as fd:
results = fd.read()
expected_out = """-- Name: s1; Type: SCHEMA; Schema: -; Owner: gpadmin
--
CREATE SCHEMA s1;
ALTER SCHEMA s1 OWNER TO gpadmin;
SET search_path = s1, pg_catalog;
--
-- Name: t1; Type: TABLE; Schema: s1; Owner: gpadmin; Tablespace:
--
CREATE TABLE t1 (
c1 integer,
c2 text,
c3 date
) DISTRIBUTED BY (c1);
;
ALTER TABLE s1.t1 OWNER TO gpadmin;
--
-- Name: v1; Type: VIEW; Schema: s1; Owner: gpadmin
--
CREATE VIEW v1 AS
SELECT t1.c1, t1.c2 FROM t1;
ALTER TABLE s1.v1 OWNER TO gpadmin;
--
-- Name: user_defined_function; Type: FUNCTION; Schema: s1; Owner: gpadmin; Tablespace:
--
CREATE FUNCTION user_defined_function as $$
print 'Hello, World'
$$ LANGUAGE as plpgsql;
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: s1; Owner: gpadmin; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)
--
-- Name: id_seq; Type: SEQUENCE; Schema: s1; Owner: gpadmin
--
CREATE SEQUENCE id_seq
START WITH 1
INCREMENT BY 1
NO MAXVALUE
NO MINVALUE
CACHE 1;
ALTER TABLE s1.id_seq OWNER TO gpadmin;"""
self.assertEquals(results, expected_out)
os.remove(in_name)
os.remove(out_name)
def test_process_schema_function_empty_fitler_list(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Name: ao_part_table_view; Type: VIEW; Schema: public; Owner: dcddev; Tablespace:
--
CREATE VIEW ao_part_table_view as SELECT * FROM ao_part_table;
--
-- Name: user_defined_function; Type: FUNCTION; Schema: public; Owner: dcddev; Tablespace:
--
CREATE FUNCTION user_defined_function as $$
print 'Hello, World'
$$ LANGUAGE as plpgsql;
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
dump_schemas = []
dump_tables = []
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET default_tablespace = '';
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_function_non_matching_filter(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: public; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
dump_schemas = ['no_match_schema']
dump_tables = [('no_match_schema', 'no_match_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET default_tablespace = '';
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_process_schema_function_non_matching_constraint(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = some_schema, pg_catalog;
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: some_schema; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY public.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
dump_schemas = ['public']
dump_tables = [('public', 'ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_check_valid_schema01(self):
dump_schemas = set(['schema1', 'schema2'])
name = 'schema1'
output = check_valid_schema(name, dump_schemas)
self.assertEquals(output, True)
def test_check_valid_schema02(self):
dump_schemas = set(['schema1', 'schema2'])
name = 'schema3'
output = check_valid_schema(name, dump_schemas)
self.assertEquals(output, False)
def test_check_valid_schema03(self):
dump_schemas = set(['schema1', 'schema2'])
name = ''
output = check_valid_schema(name, dump_schemas)
self.assertEquals(output, False)
def test_check_valid_schema04(self):
dump_schemas = set()
name = 'schema1'
output = check_valid_schema(name, dump_schemas)
self.assertEquals(output, False)
def test_check_valid_relname01(self):
dump_tables = [('public', 'ao_part_table')]
name = 'ao_part_table'
schema = 'public'
output = check_valid_relname(schema, name, dump_tables)
self.assertEquals(output, True)
def test_check_valid_relname02(self):
dump_tables = [('public', 'ao_part_table')]
name = 'ao_part_table'
schema = 'pepper'
output = check_valid_relname(schema, name, dump_tables)
self.assertEquals(output, False)
def test_check_valid_relname03(self):
dump_tables = [('public', 'ao_part_table')]
name = 'co_part_table'
schema = 'public'
output = check_valid_relname(schema, name, dump_tables)
self.assertEquals(output, False)
def test_process_schema_function_drop_table(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
DROP TABLE public.heap_table;
DROP TABLE public.ao_part_table;
DROP PROCEDURAL LANGUAGE plpgsql;
DROP SCHEMA public;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = some_schema, pg_catalog;
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: some_schema; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY some_schema.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
dump_schemas = ['public']
dump_tables = [('public', 'ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
DROP TABLE public.ao_part_table;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
def test_process_schema_user_function_having_drop_table(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
DROP TABLE public.heap_table;
DROP TABLE public.ao_part_table;
DROP PROCEDURAL LANGUAGE plpgsql;
DROP SCHEMA public;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = some_schema, pg_catalog;
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: some_schema; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY some_schema.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)
SET search_path = foo, pg_catalog;
---
--- Name: foofunc(); Type: FUNCTION; Schema: foo; Owner: foo
---
CREATE OR REPLACE FUNCTION foofunc()
RETURNS TEXT AS $$
DECLARE ver TEXT;
BEGIN
DROP TABLE IF EXISTS footab;
SELECT version() INTO ver;
RETURN ver;
END;
$$ LANGUAGE plpgsql;"""
dump_schemas = ['public']
dump_tables = [('public', 'ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
DROP TABLE public.ao_part_table;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
with open(outfile, 'r') as fd:
results = fd.read().strip()
self.assertEquals(results, expected_out)
def test_process_schema_function_drop_external_table(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
DROP TABLE public.heap_table;
DROP EXTERNAL TABLE public.ao_part_table;
DROP PROCEDURAL LANGUAGE plpgsql;
DROP SCHEMA public;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
SET search_path = some_schema, pg_catalog;
--
-- Name: ao_part_table_constraint; Type: CONSTRAINT; Schema: some_schema; Owner: dcddev; Tablespace:
--
ALTER TABLE ONLY some_schema.ao_part_table
ADD CONSTRAINT constraint_name PRIMARY KEY (name);
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
dump_schemas = ['public']
dump_tables = [('public', 'ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
DROP EXTERNAL TABLE public.ao_part_table;
SET default_with_oids = false;
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: EXTERNAL TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
def test_process_schema_single_table(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
--
-- Name: user_schema_a; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_a;
ALTER SCHEMA user_schema_a OWNER TO user_role_a;
--
-- Name: user_schema_b; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_b;
ALTER SCHEMA user_schema_b OWNER TO user_role_a;
--
-- Name: user_schema_c; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_c;
ALTER SCHEMA user_schema_c OWNER TO user_role_a;
--
-- Name: user_schema_d; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_d;
ALTER SCHEMA user_schema_d OWNER TO user_role_a;
--
-- Name: user_schema_e; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_e;
ALTER SCHEMA user_schema_e OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = user_schema_a, pg_catalog;
SET default_tablespace = '';
--
-- Name: user_table; Type: TABLE; Schema: user_schema_a; Owner: user_role_b; Tablespace:
--
CREATE TABLE user_table (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_a.user_table OWNER TO user_role_b;
SET search_path = user_schema_b, pg_catalog;
--
-- Name: test_table; Type: TABLE; Schema: user_schema_b; Owner: dcddev; Tablespace:
--
CREATE TABLE test_table (
a integer
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_b.test_table OWNER TO dcddev;
SET search_path = user_schema_c, pg_catalog;
--
-- Name: test_table; Type: TABLE; Schema: user_schema_c; Owner: user_role_b; Tablespace:
--
CREATE TABLE test_table (
a integer
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_c.test_table OWNER TO user_role_b;
SET search_path = user_schema_d, pg_catalog;
--
-- Name: test_table; Type: TABLE; Schema: user_schema_d; Owner: dcddev; Tablespace:
--
CREATE TABLE test_table (
a integer
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_d.test_table OWNER TO dcddev;
SET search_path = user_schema_e, pg_catalog;
--
-- Name: test_table; Type: TABLE; Schema: user_schema_e; Owner: dcddev; Tablespace:
--
CREATE TABLE test_table (
a integer
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_e.test_table OWNER TO dcddev;
SET search_path = user_schema_a, pg_catalog;
--
-- Data for Name: user_table; Type: TABLE DATA; Schema: user_schema_a; Owner: user_role_b
--
COPY user_table (a, b) FROM stdin;
\.
SET search_path = user_schema_b, pg_catalog;
--
-- Data for Name: test_table; Type: TABLE DATA; Schema: user_schema_b; Owner: dcddev
--
COPY test_table (a) FROM stdin;
\.
SET search_path = user_schema_c, pg_catalog;
--
-- Data for Name: test_table; Type: TABLE DATA; Schema: user_schema_c; Owner: dcddev
--
COPY test_table (a) FROM stdin;
\.
SET search_path = user_schema_d, pg_catalog;
--
-- Data for Name: test_table; Type: TABLE DATA; Schema: user_schema_d; Owner: dcddev
--
COPY test_table (a) FROM stdin;
\.
SET search_path = user_schema_e, pg_catalog;
--
-- Data for Name: test_table; Type: TABLE DATA; Schema: user_schema_e; Owner: dcddev
--
COPY test_table (a) FROM stdin;
\.
--
-- Name: public; Type: ACL; Schema: -; Owner: dcddev
--
REVOKE ALL ON SCHEMA public FROM PUBLIC;
REVOKE ALL ON SCHEMA public FROM dcddev;
GRANT ALL ON SCHEMA public TO dcddev;
GRANT ALL ON SCHEMA public TO PUBLIC;
--
-- Name: user_schema_b; Type: ACL; Schema: -; Owner: user_role_a
--
REVOKE ALL ON SCHEMA user_schema_b FROM PUBLIC;
REVOKE ALL ON SCHEMA user_schema_b FROM user_role_a;
GRANT ALL ON SCHEMA user_schema_b TO user_role_a;
--
-- Name: user_schema_a; Type: ACL; Schema: -; Owner: user_role_a
--
REVOKE ALL ON SCHEMA user_schema_a FROM PUBLIC;
REVOKE ALL ON SCHEMA user_schema_a FROM user_role_a;
GRANT ALL ON SCHEMA user_schema_a TO user_role_a;
SET search_path = user_schema_a, pg_catalog;
--
-- Name: user_table; Type: ACL; Schema: user_schema_a; Owner: user_role_b
--
REVOKE ALL ON TABLE user_table FROM PUBLIC;
REVOKE ALL ON TABLE user_table FROM user_role_b;
GRANT ALL ON TABLE user_table TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
dump_schemas = ['user_schema_a', 'user_schema_e']
dump_tables = [('user_schema_a', 'user_table'), ('user_schema_e', 'test_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: user_schema_a; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_a;
ALTER SCHEMA user_schema_a OWNER TO user_role_a;
--
-- Name: user_schema_e; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_e;
ALTER SCHEMA user_schema_e OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = user_schema_a, pg_catalog;
SET default_tablespace = '';
--
-- Name: user_table; Type: TABLE; Schema: user_schema_a; Owner: user_role_b; Tablespace:
--
CREATE TABLE user_table (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_a.user_table OWNER TO user_role_b;
SET search_path = user_schema_e, pg_catalog;
--
-- Name: test_table; Type: TABLE; Schema: user_schema_e; Owner: dcddev; Tablespace:
--
CREATE TABLE test_table (
a integer
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_e.test_table OWNER TO dcddev;
SET search_path = user_schema_a, pg_catalog;
--
-- Data for Name: user_table; Type: TABLE DATA; Schema: user_schema_a; Owner: user_role_b
--
COPY user_table (a, b) FROM stdin;
\.
SET search_path = user_schema_e, pg_catalog;
--
-- Data for Name: test_table; Type: TABLE DATA; Schema: user_schema_e; Owner: dcddev
--
COPY test_table (a) FROM stdin;
\.
--
SET search_path = user_schema_a, pg_catalog;
--
-- Name: user_table; Type: ACL; Schema: user_schema_a; Owner: user_role_b
--
REVOKE ALL ON TABLE user_table FROM PUBLIC;
REVOKE ALL ON TABLE user_table FROM user_role_b;
GRANT ALL ON TABLE user_table TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
os.remove(infile)
os.remove(outfile)
def test_check_dropped_table00(self):
line = 'DROP TABLE public.ao_part_table;'
dump_tables = [('public', 'ao_part_table')]
drop_table_expr = 'DROP TABLE '
output = check_dropped_table(line, dump_tables, None, drop_table_expr)
self.assertTrue(output)
def test_check_dropped_table01(self):
line = 'DROP TABLE public.ao_part_table;'
dump_tables = [('pepper', 'ao_part_table')]
drop_table_expr = 'DROP TABLE '
output = check_dropped_table(line, dump_tables, None, drop_table_expr)
self.assertFalse(output)
def test_check_dropped_table02(self):
line = 'DROP TABLE public.ao_part_table;'
dump_tables = [('public', 'ao_table')]
drop_table_expr = 'DROP TABLE '
output = check_dropped_table(line, dump_tables, None, drop_table_expr)
self.assertFalse(output)
def test_process_schema_foreign_table(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
-- Name: ao_part_table; Type: FOREIGN TABLE; Schema: public; Owner: dcddev; Tablespace:
--
CREATE FOREIGN TABLE ao_part_table (
column1 integer,
column2 character varying(20),
column3 date
) DISTRIBUTED BY (column1); with (appendonly=true)"""
dump_schemas = ['public']
dump_tables = [('public', 'ao_part_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET search_path = public, pg_catalog;
SET default_tablespace = '';
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
def test_process_schema_with_privileges(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: dcddev
--
COMMENT ON SCHEMA public IS 'Standard public schema';
--
-- Name: user_schema_a; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_a;
ALTER SCHEMA user_schema_a OWNER TO user_role_a;
--
-- Name: user_schema_b; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_b;
ALTER SCHEMA user_schema_b OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = user_schema_a, pg_catalog;
SET default_tablespace = '';
--
-- Name: user_table; Type: TABLE; Schema: user_schema_a; Owner: user_role_b; Tablespace:
--
CREATE TABLE user_table (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_a.user_table OWNER TO user_role_b;
SET search_path = user_schema_b, pg_catalog;
--
-- Name: test_table; Type: TABLE; Schema: user_schema_b; Owner: dcddev; Tablespace:
--
CREATE TABLE test_table (
a integer
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_b.test_table OWNER TO dcddev;
SET search_path = user_schema_a, pg_catalog;
--
-- Data for Name: user_table; Type: TABLE DATA; Schema: user_schema_a; Owner: user_role_b
--
COPY user_table (a, b) FROM stdin;
\.
SET search_path = user_schema_b, pg_catalog;
--
-- Data for Name: test_table; Type: TABLE DATA; Schema: user_schema_b; Owner: dcddev
--
COPY test_table (a) FROM stdin;
\.
--
-- Name: public; Type: ACL; Schema: -; Owner: dcddev
--
REVOKE ALL ON SCHEMA public FROM PUBLIC;
REVOKE ALL ON SCHEMA public FROM dcddev;
GRANT ALL ON SCHEMA public TO dcddev;
GRANT ALL ON SCHEMA public TO PUBLIC;
--
-- Name: user_schema_b; Type: ACL; Schema: -; Owner: user_role_a
--
REVOKE ALL ON SCHEMA user_schema_b FROM PUBLIC;
REVOKE ALL ON SCHEMA user_schema_b FROM user_role_a;
GRANT ALL ON SCHEMA user_schema_b TO user_role_a;
--
-- Name: user_schema_a; Type: ACL; Schema: -; Owner: user_role_a
--
REVOKE ALL ON SCHEMA user_schema_a FROM PUBLIC;
REVOKE ALL ON SCHEMA user_schema_a FROM user_role_a;
GRANT ALL ON SCHEMA user_schema_a TO user_role_a;
SET search_path = user_schema_a, pg_catalog;
--
-- Name: user_table; Type: ACL; Schema: user_schema_a; Owner: user_role_b
--
REVOKE ALL ON TABLE user_table FROM PUBLIC;
REVOKE ALL ON TABLE user_table FROM user_role_b;
GRANT ALL ON TABLE user_table TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
dump_schemas = ['user_schema_a']
dump_tables = [('user_schema_a', 'user_table')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: user_schema_a; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA user_schema_a;
ALTER SCHEMA user_schema_a OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = user_schema_a, pg_catalog;
SET default_tablespace = '';
--
-- Name: user_table; Type: TABLE; Schema: user_schema_a; Owner: user_role_b; Tablespace:
--
CREATE TABLE user_table (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE user_schema_a.user_table OWNER TO user_role_b;
SET search_path = user_schema_a, pg_catalog;
--
-- Data for Name: user_table; Type: TABLE DATA; Schema: user_schema_a; Owner: user_role_b
--
COPY user_table (a, b) FROM stdin;
\.
SET search_path = user_schema_a, pg_catalog;
--
-- Name: user_table; Type: ACL; Schema: user_schema_a; Owner: user_role_b
--
REVOKE ALL ON TABLE user_table FROM PUBLIC;
REVOKE ALL ON TABLE user_table FROM user_role_b;
GRANT ALL ON TABLE user_table TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
def test_special_char_schema_name_filter(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: 测试_schema; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA "测试_schema";
ALTER SCHEMA "测试_schema" OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = "测试_schema", pg_catalog;
--
-- Name: 测试; Type: TABLE; Schema: 测试_schema; Owner: user_role_b; Tablespace:
--
CREATE TABLE "测试" (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE "测试_schema"."测试" OWNER TO user_role_b;
SET search_path = "测试_schema", pg_catalog;
--
-- Data for Name: 测试; Type: TABLE DATA; Schema: 测试_schema; Owner: user_role_b
--
COPY "测试" (a, b) FROM stdin;
\.
--
-- Name: public; Type: ACL; Schema: -; Owner: dcddev
--
REVOKE ALL ON SCHEMA public FROM PUBLIC;
REVOKE ALL ON SCHEMA public FROM dcddev;
GRANT ALL ON SCHEMA public TO dcddev;
GRANT ALL ON SCHEMA public TO PUBLIC;
--
-- Name: 测试_schema; Type: ACL; Schema: -; Owner: user_role_a
--
REVOKE ALL ON SCHEMA "测试_schema" FROM PUBLIC;
REVOKE ALL ON SCHEMA "测试_schema" FROM user_role_a;
GRANT ALL ON SCHEMA "测试_schema" TO user_role_a;
SET search_path = "测试_schema", pg_catalog;
--
-- Name: 测试; Type: ACL; Schema: 测试_schema; Owner: user_role_b
--
REVOKE ALL ON TABLE "测试" FROM PUBLIC;
REVOKE ALL ON TABLE "测试" FROM user_role_b;
GRANT ALL ON TABLE "测试" TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
dump_schemas = ['测试_schema']
dump_tables = [('测试_schema', '测试')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: 测试_schema; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA "测试_schema";
ALTER SCHEMA "测试_schema" OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = "测试_schema", pg_catalog;
--
-- Name: 测试; Type: TABLE; Schema: 测试_schema; Owner: user_role_b; Tablespace:
--
CREATE TABLE "测试" (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE "测试_schema"."测试" OWNER TO user_role_b;
SET search_path = "测试_schema", pg_catalog;
--
-- Data for Name: 测试; Type: TABLE DATA; Schema: 测试_schema; Owner: user_role_b
--
COPY "测试" (a, b) FROM stdin;
\.
--
SET search_path = "测试_schema", pg_catalog;
--
-- Name: 测试; Type: ACL; Schema: 测试_schema; Owner: user_role_b
--
REVOKE ALL ON TABLE "测试" FROM PUBLIC;
REVOKE ALL ON TABLE "测试" FROM user_role_b;
GRANT ALL ON TABLE "测试" TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
def test_euro_char_schema_name_filter(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: Áá_schema; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA "Áá_schema";
ALTER SCHEMA "Áá_schema" OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = "Áá_schema", pg_catalog;
--
-- Name: Áá; Type: TABLE; Schema: Áá_schema; Owner: user_role_b; Tablespace:
--
CREATE TABLE "Áá" (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE "Áá_schema"."Áá" OWNER TO user_role_b;
SET search_path = "Áá_schema", pg_catalog;
--
-- Data for Name: Áá; Type: TABLE DATA; Schema: Áá_schema; Owner: user_role_b
--
COPY "Áá" (a, b) FROM stdin;
\.
--
-- Name: public; Type: ACL; Schema: -; Owner: dcddev
--
REVOKE ALL ON SCHEMA public FROM PUBLIC;
REVOKE ALL ON SCHEMA public FROM dcddev;
GRANT ALL ON SCHEMA public TO dcddev;
GRANT ALL ON SCHEMA public TO PUBLIC;
--
-- Name: Áá_schema; Type: ACL; Schema: -; Owner: user_role_a
--
REVOKE ALL ON SCHEMA "Áá_schema" FROM PUBLIC;
REVOKE ALL ON SCHEMA "Áá_schema" FROM user_role_a;
GRANT ALL ON SCHEMA "Áá_schema" TO user_role_a;
SET search_path = "Áá_schema", pg_catalog;
--
-- Name: Áá; Type: ACL; Schema: Áá_schema; Owner: user_role_b
--
REVOKE ALL ON TABLE "Áá" FROM PUBLIC;
REVOKE ALL ON TABLE "Áá" FROM user_role_b;
GRANT ALL ON TABLE "Áá" TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
dump_schemas = ['Áá_schema']
dump_tables = [('Áá_schema', 'Áá')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: Áá_schema; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA "Áá_schema";
ALTER SCHEMA "Áá_schema" OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = "Áá_schema", pg_catalog;
--
-- Name: Áá; Type: TABLE; Schema: Áá_schema; Owner: user_role_b; Tablespace:
--
CREATE TABLE "Áá" (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE "Áá_schema"."Áá" OWNER TO user_role_b;
SET search_path = "Áá_schema", pg_catalog;
--
-- Data for Name: Áá; Type: TABLE DATA; Schema: Áá_schema; Owner: user_role_b
--
COPY "Áá" (a, b) FROM stdin;
\.
--
SET search_path = "Áá_schema", pg_catalog;
--
-- Name: Áá; Type: ACL; Schema: Áá_schema; Owner: user_role_b
--
REVOKE ALL ON TABLE "Áá" FROM PUBLIC;
REVOKE ALL ON TABLE "Áá" FROM user_role_b;
GRANT ALL ON TABLE "Áá" TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
def test_cyrillic_char_schema_name_filter(self):
test_case_buf = """--
-- Greenplum Database database dump
--
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: Ж_schema; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA "Ж_schema";
ALTER SCHEMA "Ж_schema" OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = "Ж_schema", pg_catalog;
--
-- Name: Ж; Type: TABLE; Schema: Ж_schema; Owner: user_role_b; Tablespace:
--
CREATE TABLE "Ж" (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE "Ж_schema"."Ж" OWNER TO user_role_b;
SET search_path = "Ж_schema", pg_catalog;
--
-- Data for Name: Ж; Type: TABLE DATA; Schema: Ж_schema; Owner: user_role_b
--
COPY "Ж" (a, b) FROM stdin;
\.
--
-- Name: public; Type: ACL; Schema: -; Owner: dcddev
--
REVOKE ALL ON SCHEMA public FROM PUBLIC;
REVOKE ALL ON SCHEMA public FROM dcddev;
GRANT ALL ON SCHEMA public TO dcddev;
GRANT ALL ON SCHEMA public TO PUBLIC;
--
-- Name: Ж_schema; Type: ACL; Schema: -; Owner: user_role_a
--
REVOKE ALL ON SCHEMA "Ж_schema" FROM PUBLIC;
REVOKE ALL ON SCHEMA "Ж_schema" FROM user_role_a;
GRANT ALL ON SCHEMA "Ж_schema" TO user_role_a;
SET search_path = "Ж_schema", pg_catalog;
--
-- Name: Ж; Type: ACL; Schema: Ж_schema; Owner: user_role_b
--
REVOKE ALL ON TABLE "Ж" FROM PUBLIC;
REVOKE ALL ON TABLE "Ж" FROM user_role_b;
GRANT ALL ON TABLE "Ж" TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
dump_schemas = ['Ж_schema']
dump_tables = [('Ж_schema', 'Ж')]
infile = '/tmp/test_schema.in'
outfile = '/tmp/test_schema.out'
with open(infile, 'w') as fd:
fd.write(test_case_buf)
with open(infile, 'r') as fdin:
with open(outfile, 'w') as fdout:
process_schema(dump_schemas, dump_tables, fdin, fdout, None)
expected_out = """SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = off;
SET check_function_bodies = false;
SET client_min_messages = warning;
SET escape_string_warning = off;
SET default_with_oids = false;
--
-- Name: Ж_schema; Type: SCHEMA; Schema: -; Owner: user_role_a
--
CREATE SCHEMA "Ж_schema";
ALTER SCHEMA "Ж_schema" OWNER TO user_role_a;
--
-- Name: plpgsql; Type: PROCEDURAL LANGUAGE; Schema: -; Owner: dcddev
--
CREATE PROCEDURAL LANGUAGE plpgsql;
ALTER FUNCTION plpgsql_call_handler() OWNER TO dcddev;
ALTER FUNCTION plpgsql_validator(oid) OWNER TO dcddev;
SET search_path = "Ж_schema", pg_catalog;
--
-- Name: Ж; Type: TABLE; Schema: Ж_schema; Owner: user_role_b; Tablespace:
--
CREATE TABLE "Ж" (
a character(1) NOT NULL,
b character(60)
) DISTRIBUTED BY (a);
ALTER TABLE "Ж_schema"."Ж" OWNER TO user_role_b;
SET search_path = "Ж_schema", pg_catalog;
--
-- Data for Name: Ж; Type: TABLE DATA; Schema: Ж_schema; Owner: user_role_b
--
COPY "Ж" (a, b) FROM stdin;
\.
--
SET search_path = "Ж_schema", pg_catalog;
--
-- Name: Ж; Type: ACL; Schema: Ж_schema; Owner: user_role_b
--
REVOKE ALL ON TABLE "Ж" FROM PUBLIC;
REVOKE ALL ON TABLE "Ж" FROM user_role_b;
GRANT ALL ON TABLE "Ж" TO user_role_b;
--
-- Greenplum Database database dump complete
--
"""
with open(outfile, 'r') as fd:
results = fd.read()
self.assertEquals(results, expected_out)
| 24.140255
| 124
| 0.687668
| 11,080
| 79,518
| 4.70009
| 0.030596
| 0.028112
| 0.025558
| 0.025174
| 0.957352
| 0.950535
| 0.945677
| 0.934021
| 0.92609
| 0.920003
| 0
| 0.025072
| 0.200973
| 79,518
| 3,293
| 125
| 24.147586
| 0.794561
| 0.000428
| 0
| 0.902685
| 0
| 0
| 0.697139
| 0.062303
| 0
| 0
| 0
| 0
| 0.025168
| 1
| 0.022651
| false
| 0
| 0.002517
| 0
| 0.025587
| 0.001258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
20de27bcf4a199d857d9a6ab6ada66493ec5e01b
| 38
|
py
|
Python
|
src/lib/base64.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/base64.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/base64.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("base64")
| 19
| 37
| 0.763158
| 6
| 38
| 4
| 0.666667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.078947
| 38
| 1
| 38
| 38
| 0.628571
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
45b66499d402102594662fb970c7fa0b1c71d822
| 111
|
py
|
Python
|
glx/shader_program/__init__.py
|
NeilGirdhar/glx
|
643abc73e05f94ea56a00deb927a3978f01184f2
|
[
"MIT"
] | 3
|
2018-04-18T02:42:36.000Z
|
2020-09-06T15:48:17.000Z
|
glx/shader_program/__init__.py
|
NeilGirdhar/glx
|
643abc73e05f94ea56a00deb927a3978f01184f2
|
[
"MIT"
] | 1
|
2020-07-12T22:36:45.000Z
|
2020-07-13T14:20:32.000Z
|
glx/shader_program/__init__.py
|
NeilGirdhar/glx
|
643abc73e05f94ea56a00deb927a3978f01184f2
|
[
"MIT"
] | 1
|
2021-04-27T14:53:34.000Z
|
2021-04-27T14:53:34.000Z
|
from .attribute import *
from .buffer_description import *
from .shader import *
from .shader_program import *
| 22.2
| 33
| 0.783784
| 14
| 111
| 6.071429
| 0.5
| 0.352941
| 0.376471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144144
| 111
| 4
| 34
| 27.75
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b371b53a0a360f377abd8385a8d2d5099784c95c
| 4,913
|
py
|
Python
|
test/pyaz/synapse/trigger/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/synapse/trigger/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/synapse/trigger/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def create(workspace_name, name, file, no_wait=None):
params = get_params(locals())
command = "az synapse trigger create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def set(workspace_name, name, file, no_wait=None):
params = get_params(locals())
command = "az synapse trigger set " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(workspace_name):
params = get_params(locals())
command = "az synapse trigger list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(workspace_name, name):
params = get_params(locals())
command = "az synapse trigger show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(workspace_name, name, yes=None, no_wait=None):
params = get_params(locals())
command = "az synapse trigger delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def subscribe_to_event(workspace_name, name, no_wait=None):
params = get_params(locals())
command = "az synapse trigger subscribe-to-event " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def get_event_subscription_status(workspace_name, name):
params = get_params(locals())
command = "az synapse trigger get-event-subscription-status " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def unsubscribe_from_event(workspace_name, name, no_wait=None):
params = get_params(locals())
command = "az synapse trigger unsubscribe-from-event " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def start(workspace_name, name, no_wait=None):
params = get_params(locals())
command = "az synapse trigger start " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def stop(workspace_name, name, no_wait=None):
params = get_params(locals())
command = "az synapse trigger stop " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 34.118056
| 96
| 0.660493
| 600
| 4,913
| 5.345
| 0.086667
| 0.087309
| 0.062364
| 0.065482
| 0.918304
| 0.918304
| 0.918304
| 0.918304
| 0.918304
| 0.903336
| 0
| 0.005244
| 0.223692
| 4,913
| 143
| 97
| 34.356643
| 0.835606
| 0
| 0
| 0.833333
| 0
| 0
| 0.08162
| 0.010381
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.015152
| 0
| 0.166667
| 0.227273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2fa7f17e92a2676e11d6182df31ac6e83e65206c
| 13,418
|
py
|
Python
|
tests/app/parser/test_metadata_parser.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | null | null | null |
tests/app/parser/test_metadata_parser.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | 8
|
2020-03-24T15:24:18.000Z
|
2022-03-02T04:32:56.000Z
|
tests/app/parser/test_metadata_parser.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | null | null | null |
import unittest
from app.authentication.invalid_token_exception import InvalidTokenException
from app.parser.metadata_parser import parse_metadata, is_valid_metadata
from tests.app.framework.survey_runner_test_case import SurveyRunnerTestCase
class TestMetadataParser(SurveyRunnerTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
super().setUp()
self.jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07",
"tx_id": "4ec3aa9e-e8ac-4c8d-9793-6ed88b957c2f"
}
with self.application.test_request_context():
self.metadata = parse_metadata(self.jwt)
def test_transaction_id(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("tx_id"), self.metadata['tx_id'])
def test_form_type(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("form_type"), self.metadata['form_type'])
def test_collection_id(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("collection_exercise_sid"), self.metadata['collection_exercise_sid'])
def test_get_eq_id(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("eq_id"), self.metadata['eq_id'])
def test_get_period_id(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("period_id"), self.metadata['period_id'])
def test_get_period_str(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("period_str"), self.metadata['period_str'])
def test_ref_p_start_date(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("ref_p_start_date"), self.metadata['ref_p_start_date'])
def test_ref_p_end_date(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("ref_p_end_date"), self.metadata['ref_p_end_date'])
def test_ru_ref(self):
with self.application.test_request_context():
self.assertEqual(self.jwt.get("ref_p_end_date"), self.metadata['ref_p_end_date'])
def test_is_valid(self):
with self.application.test_request_context():
self.assertTrue(is_valid_metadata(self.jwt))
def test_is_valid_fails_missing_user_id(self):
jwt = {
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("user_id", field)
def test_is_valid_fails_missing_form_type(self):
jwt = {
"user_id": "1",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("form_type", field)
def test_is_valid_fails_missing_collection_exercise_sid(self):
jwt = {
"user_id": "1",
"form_type": "a",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("collection_exercise_sid", field)
def test_is_valid_fails_missing_eq_id(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("eq_id", field)
def test_is_valid_fails_missing_period_id(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("period_id", field)
def test_is_valid_fails_missing_period_str(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("period_str", field)
def test_is_valid_fails_missing_ref_p_start_date(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("ref_p_start_date", field)
def test_is_valid_fails_invalid_ref_p_start_date(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-13-31",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, _ = is_valid_metadata(jwt)
self.assertTrue(valid)
with self.assertRaises(InvalidTokenException) as ite:
parse_metadata(jwt)
self.assertIn("Incorrect data in token", ite.exception.value)
def test_is_valid_fails_invalid_ref_p_end_date(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-12-31",
"ref_p_end_date": "2016-04-31",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, _ = is_valid_metadata(jwt)
self.assertTrue(valid)
with self.assertRaises(InvalidTokenException) as ite:
parse_metadata(jwt)
self.assertIn("Incorrect data in token", ite.exception.value)
def test_is_valid_fails_invalid_return_by(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-12-31",
"ref_p_end_date": "2016-03-31",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-09-31"
}
valid, _ = is_valid_metadata(jwt)
self.assertTrue(valid)
with self.assertRaises(InvalidTokenException) as ite:
parse_metadata(jwt)
self.assertIn("Incorrect data in token", ite.exception.value)
def test_is_valid_fails_missing_ref_p_end_date(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("ref_p_end_date", field)
def test_is_valid_fails_missing_ru_ref(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("ru_ref", field)
def test_is_valid_fails_missing_ru_name(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"return_by": "2016-07-07"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("ru_name", field)
def test_is_valid_fails_missing_return_by(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple"
}
valid, field = is_valid_metadata(jwt)
self.assertFalse(valid)
self.assertEqual("return_by", field)
def test_is_valid_does_not_fail_missing_optional_value_in_token(self):
# tx_id, trad_as and employment_date are optional and might not be in the token
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07"
}
valid, _ = is_valid_metadata(jwt)
self.assertTrue(valid)
def test_invalid_tx_id(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07",
# invalid
"tx_id": "12121"
}
valid, _ = is_valid_metadata(jwt)
self.assertTrue(valid)
with self.assertRaises(InvalidTokenException) as ite:
parse_metadata(jwt)
self.assertIn("Incorrect data in token", ite.exception.value)
def test_malformed_tx_id(self):
jwt = {
"user_id": "1",
"form_type": "a",
"collection_exercise_sid": "test-sid",
"eq_id": "2",
"period_id": "3",
"period_str": "2016-01-01",
"ref_p_start_date": "2016-02-02",
"ref_p_end_date": "2016-03-03",
"ru_ref": "2016-04-04",
"ru_name": "Apple",
"return_by": "2016-07-07",
# one character short
"tx_id": "83a3db82-bea7-403c-a411-6357ff70f2f"
}
valid, _ = is_valid_metadata(jwt)
self.assertTrue(valid)
with self.assertRaises(InvalidTokenException) as ite:
parse_metadata(jwt)
self.assertIn("Incorrect data in token", ite.exception.value)
if __name__ == '__main__':
unittest.main()
| 35.310526
| 111
| 0.539574
| 1,645
| 13,418
| 4.067477
| 0.071125
| 0.028695
| 0.026155
| 0.0411
| 0.85488
| 0.83291
| 0.829024
| 0.805709
| 0.771932
| 0.760275
| 0
| 0.084041
| 0.315397
| 13,418
| 379
| 112
| 35.403694
| 0.64435
| 0.010806
| 0
| 0.758721
| 0
| 0
| 0.273817
| 0.040021
| 0
| 0
| 0
| 0
| 0.139535
| 1
| 0.081395
| false
| 0
| 0.011628
| 0
| 0.09593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2fd7e022f34994e2d2f9dd8ae5459291ade58bdf
| 5,302
|
py
|
Python
|
filebrowser/tests/base.py
|
klueska/django-filebrowser
|
71cd35e876529d526a5b31380609b93209c7a0e4
|
[
"BSD-3-Clause"
] | 6
|
2016-03-10T19:38:17.000Z
|
2021-02-23T09:34:59.000Z
|
filebrowser/tests/base.py
|
klueska/django-filebrowser
|
71cd35e876529d526a5b31380609b93209c7a0e4
|
[
"BSD-3-Clause"
] | 602
|
2015-01-05T16:30:08.000Z
|
2021-02-02T21:44:38.000Z
|
filebrowser/tests/base.py
|
klueska/django-filebrowser
|
71cd35e876529d526a5b31380609b93209c7a0e4
|
[
"BSD-3-Clause"
] | 18
|
2015-02-12T15:50:17.000Z
|
2021-04-27T16:40:36.000Z
|
# coding: utf-8
# PYTHON IMPORTS
import os
import ntpath
import posixpath
# DJANGO IMPORTS
from django.test import TestCase
from django.contrib.auth.models import User
from django.utils.encoding import filepath_to_uri
# FILEBROWSER IMPORTS
from filebrowser.settings import *
import filebrowser
from filebrowser.base import FileObject
from filebrowser.sites import site
class FileObjectPathTests(TestCase):
def setUp(self):
"""
Save original values/functions so they can be restored in tearDown
"""
self.original_directory = site.directory
self.original_media_url = filebrowser.base.MEDIA_URL
self.original_path = filebrowser.base.os.path
def test_windows_paths(self):
"""
Use ntpath to test windows paths independently from current os
"""
site.directory = 'uploads/'
filebrowser.base.os.path = ntpath
filebrowser.base.MEDIA_URL = '/media/'
f = FileObject('uploads\\testdir\\testfile.jpg', site=site)
self.assertEqual(f.path_relative_directory, 'testdir\\testfile.jpg')
self.assertEqual(f.directory, 'testdir\\testfile.jpg')
self.assertEqual(f.folder, r'testdir')
def test_posix_paths(self):
"""
Use posixpath to test posix paths independently from current os
"""
filebrowser.base.os.path = posixpath
site.directory = 'uploads/'
filebrowser.base.MEDIA_URL = '/media/'
f = FileObject('uploads/testdir/testfile.jpg', site=site)
self.assertEqual(f.path_relative_directory, 'testdir/testfile.jpg')
self.assertEqual(f.directory, 'testdir/testfile.jpg')
self.assertEqual(f.folder, r'testdir')
def tearDown(self):
"""
Restore original values/functions
"""
filebrowser.base.MEDIA_URL = self.original_media_url
filebrowser.base.os.path = self.original_path
site.directory = self.original_directory
class FileObjectUnicodeTests(TestCase):
def setUp(self):
"""
Save original values/functions so they can be restored in tearDown
"""
self.original_path = filebrowser.base.os.path
self.original_directory = site.directory
self.original_media_url = filebrowser.base.MEDIA_URL
def test_windows_paths(self):
"""
Use ntpath to test windows paths independently from current os
"""
site.directory = 'uploads/'
filebrowser.base.os.path = ntpath
filebrowser.base.MEDIA_URL = '/media/'
f = FileObject('uploads\\$%^&*\\測試文件.jpg', site=site)
self.assertEqual(f.path_relative_directory, '$%^&*\\測試文件.jpg')
self.assertEqual(f.directory, '$%^&*\\測試文件.jpg')
self.assertEqual(f.folder, r'$%^&*')
def test_posix_paths(self):
"""
Use posixpath to test posix paths independently from current os
"""
filebrowser.base.os.path = posixpath
site.directory = 'uploads/'
filebrowser.base.MEDIA_URL = '/media/'
f = FileObject('uploads/$%^&*/測試文件.jpg', site=site)
self.assertEqual(f.path_relative_directory, '$%^&*/測試文件.jpg')
self.assertEqual(f.directory, '$%^&*/測試文件.jpg')
self.assertEqual(f.folder, r'$%^&*')
def tearDown(self):
"""
Restore original values/functions
"""
filebrowser.base.MEDIA_URL = self.original_media_url
filebrowser.base.os.path = self.original_path
site.directory = self.original_directory
class FileObjectVersionTests(TestCase):
def setUp(self):
"""
Save original values/functions so they can be restored in tearDown
"""
self.original_path = filebrowser.base.os.path
self.original_directory = site.directory
self.original_media_url = filebrowser.base.MEDIA_URL
def test_windows_paths(self):
"""
Use ntpath to test windows paths independently from current os
"""
site.directory = 'uploads/'
filebrowser.base.os.path = ntpath
filebrowser.base.MEDIA_URL = '/media/'
f = FileObject('uploads\\testdir\\testfile.jpg', site=site)
self.assertEqual(f.path_relative_directory, 'testdir\\testfile.jpg')
self.assertEqual(f.directory, 'testdir\\testfile.jpg')
self.assertEqual(f.folder, r'testdir')
def test_posix_paths(self):
"""
Use posixpath to test posix paths independently from current os
"""
filebrowser.base.os.path = posixpath
site.directory = 'uploads/'
filebrowser.base.MEDIA_URL = '/media/'
f = FileObject('uploads/testdir/testfile.jpg', site=site)
self.assertEqual(f.path_relative_directory, 'testdir/testfile.jpg')
self.assertEqual(f.directory, 'testdir/testfile.jpg')
self.assertEqual(f.folder, r'testdir')
def tearDown(self):
"""
Restore original values/functions
"""
filebrowser.base.MEDIA_URL = self.original_media_url
filebrowser.base.os.path = self.original_path
site.directory = self.original_directory
| 33.1375
| 76
| 0.633346
| 583
| 5,302
| 5.653516
| 0.114923
| 0.113774
| 0.087379
| 0.083738
| 0.886226
| 0.886226
| 0.886226
| 0.875
| 0.875
| 0.875
| 0
| 0.000255
| 0.259336
| 5,302
| 159
| 77
| 33.345912
| 0.839063
| 0.141079
| 0
| 0.776471
| 0
| 0
| 0.11985
| 0.057584
| 0
| 0
| 0
| 0
| 0.211765
| 1
| 0.141176
| false
| 0
| 0.117647
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
64155cb4e678984691ed4b6a6e65eb60a44adc8d
| 3,190
|
py
|
Python
|
tests/cutoff/cutoff_cleanup.py
|
cmutel/Ocelot
|
20e9639570c43f84ae255750a6c402ebabe00981
|
[
"BSD-3-Clause"
] | 21
|
2016-06-01T14:10:07.000Z
|
2022-02-28T01:56:31.000Z
|
tests/cutoff/cutoff_cleanup.py
|
cmutel/Ocelot
|
20e9639570c43f84ae255750a6c402ebabe00981
|
[
"BSD-3-Clause"
] | 152
|
2016-05-16T21:33:22.000Z
|
2019-06-24T12:57:14.000Z
|
tests/cutoff/cutoff_cleanup.py
|
cmutel/Ocelot
|
20e9639570c43f84ae255750a6c402ebabe00981
|
[
"BSD-3-Clause"
] | 12
|
2016-09-05T15:35:59.000Z
|
2021-07-03T19:28:47.000Z
|
# -*- coding: utf-8 -*-
from ocelot.transformations.cutoff.cleanup import (
drop_rp_activity_links,
drop_zero_amount_activity_links,
remove_consequential_exchanges,
)
def test_remove_consequential_exchanges():
given = [{
'name': 'a name',
'exchanges': [{
'name': 'exchange 1',
'amount': 1,
'properties': [{'name': 'consequential', 'amount': 1}]
}, {
'name': 'exchange 2',
'amount': 1,
'properties': [{'name': 'consequential', 'amount': -1}]
}, {
'name': 'exchange 3',
'amount': 1,
'properties': [{'name': 'inconsequential', 'amount': 1}]
}]
}]
expected = [{
'name': 'a name',
'exchanges': [{
'name': 'exchange 2',
'amount': 1,
'properties': [{'name': 'consequential', 'amount': -1}]
}, {
'name': 'exchange 3',
'amount': 1,
'properties': [{'name': 'inconsequential', 'amount': 1}]
}]
}]
assert remove_consequential_exchanges(given) == expected
def test_drop_rp_activity_links_rp():
given = [{
'name': 'a name',
'exchanges': [{
'type': 'not reference product',
'activity link': 'keep me'
}, {
'type': 'reference product',
'activity link': 'delete me',
'name': 'something for logging'
}]
}]
expected = [{
'name': 'a name',
'exchanges': [{
'type': 'not reference product',
'activity link': 'keep me'
}, {
'type': 'reference product',
'name': 'something for logging'
}]
}]
assert drop_rp_activity_links(given) == expected
def test_drop_rp_activity_links_dropped_product():
given = [{
'name': 'a name',
'exchanges': [{
'type': 'not reference product',
'activity link': 'keep me'
}, {
'type': 'dropped product',
'activity link': 'delete me',
'name': 'something for logging'
}]
}]
expected = [{
'name': 'a name',
'exchanges': [{
'type': 'not reference product',
'activity link': 'keep me'
}, {
'type': 'dropped product',
'name': 'something for logging'
}]
}]
assert drop_rp_activity_links(given) == expected
def test_drop_zero_amount_activity_links():
given = [{
'name': 'a name',
'exchanges': [{
'type': 'foo',
'amount': 1,
'activity link': 'keep me'
}, {
'type': 'foo',
'amount': 0,
'activity link': 'delete me',
'name': 'something for logging'
}]
}]
expected = [{
'name': 'a name',
'exchanges': [{
'type': 'foo',
'amount': 1,
'activity link': 'keep me'
}, {
'type': 'foo',
'amount': 0,
'name': 'something for logging'
}]
}]
assert drop_zero_amount_activity_links(given) == expected
| 27.73913
| 68
| 0.462069
| 270
| 3,190
| 5.311111
| 0.166667
| 0.058577
| 0.050209
| 0.100418
| 0.864017
| 0.836123
| 0.740586
| 0.740586
| 0.702929
| 0.674338
| 0
| 0.01006
| 0.376803
| 3,190
| 114
| 69
| 27.982456
| 0.711268
| 0.006583
| 0
| 0.87963
| 0
| 0
| 0.312283
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 1
| 0.037037
| false
| 0
| 0.009259
| 0
| 0.046296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
643bc6dc139ccf9e6c0dfbe0d0d67c8703890ed3
| 3,555
|
py
|
Python
|
tests/Unit/PointwiseFunctions/AnalyticData/GrMhd/RiemannProblem.py
|
nilsvu/spectre
|
1455b9a8d7e92db8ad600c66f54795c29c3052ee
|
[
"MIT"
] | 117
|
2017-04-08T22:52:48.000Z
|
2022-03-25T07:23:36.000Z
|
tests/Unit/PointwiseFunctions/AnalyticData/GrMhd/RiemannProblem.py
|
GitHimanshuc/spectre
|
4de4033ba36547113293fe4dbdd77591485a4aee
|
[
"MIT"
] | 3,177
|
2017-04-07T21:10:18.000Z
|
2022-03-31T23:55:59.000Z
|
tests/Unit/PointwiseFunctions/AnalyticData/GrMhd/RiemannProblem.py
|
geoffrey4444/spectre
|
9350d61830b360e2d5b273fdd176dcc841dbefb0
|
[
"MIT"
] | 85
|
2017-04-07T19:36:13.000Z
|
2022-03-01T10:21:00.000Z
|
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def rest_mass_density(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field,
right_magnetic_field, lapse, shift):
assert len(x) == 3
return left_density if x[0] <= 0.0 else right_density
def spatial_velocity(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field, right_magnetic_field,
lapse, shift):
return np.asarray(left_velocity if x[0] <= 0.0 else right_velocity)
def specific_internal_energy(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field,
right_magnetic_field, lapse, shift):
return (1.0 / (adiabatic_index - 1.0) * compute_pressure(
x, adiabatic_index, left_density, right_density, left_pressure,
right_pressure, left_velocity, right_velocity, left_magnetic_field,
right_magnetic_field, lapse, shift) / rest_mass_density(
x, adiabatic_index, left_density, right_density, left_pressure,
right_pressure, left_velocity, right_velocity, left_magnetic_field,
right_magnetic_field, lapse, shift))
def compute_pressure(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field, right_magnetic_field,
lapse, shift):
return left_pressure if x[0] <= 0.0 else right_pressure
def lorentz_factor(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field, right_magnetic_field,
lapse, shift):
v = spatial_velocity(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field,
right_magnetic_field, lapse, shift)
return 1. / np.sqrt(1. - np.dot(v, v))
def specific_enthalpy(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field,
right_magnetic_field, lapse, shift):
return (1.0 + adiabatic_index * specific_internal_energy(
x, adiabatic_index, left_density, right_density, left_pressure,
right_pressure, left_velocity, right_velocity, left_magnetic_field,
right_magnetic_field, lapse, shift))
def magnetic_field(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field, right_magnetic_field,
lapse, shift):
return np.asarray(
left_magnetic_field if x[0] <= 0.0 else right_magnetic_field)
def divergence_cleaning_field(x, adiabatic_index, left_density, right_density,
left_pressure, right_pressure, left_velocity,
right_velocity, left_magnetic_field,
right_magnetic_field, lapse, shift):
return 0.0
| 46.776316
| 79
| 0.653727
| 406
| 3,555
| 5.327586
| 0.123153
| 0.162275
| 0.102173
| 0.105409
| 0.881646
| 0.881646
| 0.881646
| 0.853907
| 0.853907
| 0.853907
| 0
| 0.009016
| 0.282419
| 3,555
| 75
| 80
| 47.4
| 0.838887
| 0.017722
| 0
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 1
| 0.142857
| false
| 0
| 0.017857
| 0.107143
| 0.303571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
ff3d983f80a524f2c387f2f474ac3fd681b4c986
| 4,704
|
py
|
Python
|
lookuptable.py
|
cor2ni/3D_brain_plot
|
3eae836ae1bd73736a332d53fe3775801a667350
|
[
"MIT"
] | null | null | null |
lookuptable.py
|
cor2ni/3D_brain_plot
|
3eae836ae1bd73736a332d53fe3775801a667350
|
[
"MIT"
] | null | null | null |
lookuptable.py
|
cor2ni/3D_brain_plot
|
3eae836ae1bd73736a332d53fe3775801a667350
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Here is the code for LookupTables
import vtk
import colorsys
# LookupTable: dark red - orange - white - light green - dark blue
# The input variables are the minimal and maximal values of the scalar range.
def get_lut_1(min_val, max_val): #e.g. (0, 100)
dif = max_val - min_val
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(max_val)
lut.SetTableRange(min_val, max_val)
lut.Build()
colortransfer = vtk.vtkColorTransferFunction()
minscalar = int(min_val + 0.05*dif)
maxscalar = int(max_val - 0.05*dif)
limitmin = int(min_val + 0.48*dif)
limitmax = int(max_val - 0.48*dif)
step = 1
for i in range(minscalar, limitmin, step):
hmin=((i-minscalar)/(limitmin-minscalar))*(43/255) #0 #red #43/255 #yellow
smin=1
l=(((i-minscalar)/(limitmin-minscalar))*0.85)+0.15
r, g, b = colorsys.hls_to_rgb(h=hmin, s=smin, l=l)
lut.SetTableValue(i, r, g, b, 1)
colortransfer.AddRGBPoint(i, r, g, b)
for i in range(limitmax, maxscalar, step):
blue=170/255
green=85/255
hmax=(((i-limitmax)/(maxscalar-limitmax))*(blue-green))+green #85/255 #green #170/255 #blue
smax=1
l=((1-(i-limitmax)/(maxscalar-limitmax))*0.85)+0.15
r, g, b = colorsys.hls_to_rgb(h=hmax, s=smax, l=l)
lut.SetTableValue(i, r, g, b, 1)
colortransfer.AddRGBPoint(i, r, g, b)
for i in range(limitmin, limitmax, step):
lut.SetTableValue(i, 1, 1, 1, 1)
colortransfer.AddRGBPoint(i, 1, 1, 1)
for i in range(min_val, minscalar, step):
r, g, b = colorsys.hls_to_rgb(h=0, s=smin, l=0.15)
lut.SetTableValue(i, r, g, b, 1)
colortransfer.AddRGBPoint(i, r, g, b)
for i in range(maxscalar, max_val, step):
r, g, b = colorsys.hls_to_rgb(h=hmax, s=smax, l=0.15)
lut.SetTableValue(i, r, g, b, 1)
colortransfer.AddRGBPoint(i, r, g, b)
return colortransfer, lut
# LookupTable: dark red - light red - white - light green - dark green
def get_lut_2(min_val, max_val):
dif = max_val - min_val
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(max_val)
lut.SetTableRange(min_val, max_val)
lut.Build()
colortransfer = vtk.vtkColorTransferFunction()
minscalar = int(min_val + 0.05*dif)
maxscalar = int(max_val - 0.05*dif)
limitmin = int(min_val + 0.48*dif)
limitmax = int(max_val - 0.48*dif)
step = 1
for i in range(minscalar, limitmin, step):
hmin=0 #red
smin=1
l=(((i-minscalar)/(limitmin-minscalar))*0.85)+0.15
r, g, b = colorsys.hls_to_rgb(h=hmin, s=smin, l=l)
lut.SetTableValue(i, r, g, b, 1)
colortransfer.AddRGBPoint(i, r, g, b)
for i in range(limitmax, maxscalar, step):
hmax=170/255 #85/255 #green #170/255 #blue
smax=1
l=((1-(i-limitmax)/(maxscalar-limitmax))*0.85)+0.15
r, g, b = colorsys.hls_to_rgb(h=hmax, s=smax, l=l)
lut.SetTableValue(i, r, g, b, 1)
colortransfer.AddRGBPoint(i, r, g, b)
for i in range(limitmin, limitmax, step):
lut.SetTableValue(i, 1, 1, 1, 1)
colortransfer.AddRGBPoint(i, 1, 1, 1)
for i in range(min_val, minscalar, step):
r, g, b = colorsys.hls_to_rgb(h=hmin, s=smin, l=0.15)
colortransfer.AddRGBPoint(i, r, g, b)
lut.SetTableValue(i, r, g, b, 1)
for i in range(maxscalar, max_val, step):
r, g, b = colorsys.hls_to_rgb(h=hmax, s=smax, l=0.15)
lut.SetTableValue(i, r, g, b, 1)
colortransfer.AddRGBPoint(i, r, g, b)
return colortransfer, lut
# LookupTable: dark red - light red - white
def get_lut_3(min_val, max_val):
dif = max_val - min_val
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(max_val)
lut.SetTableRange(min_val, max_val)
lut.Build()
colortransfer = vtk.vtkColorTransferFunction()
minscalar = int(min_val + 0.05*dif)
maxscalar = int(max_val - 0.05*dif)
step = 1
for i in range(min_val, minscalar, step):
hmin=0 #red #85/255 #green #170/255 #blue
smin=1
r, g, b = colorsys.hls_to_rgb(h=hmin, s=smin, l=0.10)
colortransfer.AddRGBPoint(i, r, g, b)
lut.SetTableValue(i, r, g, b, 1)
for i in range(minscalar, maxscalar, step):
l=(((i-minscalar)/(maxscalar-minscalar))*0.90)+0.10
r, g, b = colorsys.hls_to_rgb(h=hmin, s=smin, l=l)
lut.SetTableValue(i, r, g, b, 1)
colortransfer.AddRGBPoint(i, r, g, b)
for i in range(maxscalar, max_val, step):
lut.SetTableValue(i, 1, 1, 1, 1)
colortransfer.AddRGBPoint(i, 1, 1, 1)
return colortransfer, lut
| 38.876033
| 105
| 0.614158
| 745
| 4,704
| 3.793289
| 0.112752
| 0.021231
| 0.031847
| 0.028309
| 0.839349
| 0.836872
| 0.82661
| 0.825195
| 0.825195
| 0.814225
| 0
| 0.052854
| 0.243835
| 4,704
| 121
| 106
| 38.876033
| 0.741636
| 0.092474
| 0
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.019048
| 0
| 0.07619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ff6faca15b9857a127cbf161ed15556dabf3fa7f
| 32
|
py
|
Python
|
005.py
|
thirtiseven/projecteuler
|
57ad70beae39ff82191cf37e71acade0c63a62f5
|
[
"MIT"
] | null | null | null |
005.py
|
thirtiseven/projecteuler
|
57ad70beae39ff82191cf37e71acade0c63a62f5
|
[
"MIT"
] | null | null | null |
005.py
|
thirtiseven/projecteuler
|
57ad70beae39ff82191cf37e71acade0c63a62f5
|
[
"MIT"
] | 1
|
2019-12-27T23:30:51.000Z
|
2019-12-27T23:30:51.000Z
|
print(2*3*4*5*7*3*11*13*2*17*19)
| 32
| 32
| 0.625
| 12
| 32
| 1.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.46875
| 0
| 32
| 1
| 32
| 32
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
ff82f1667a3c265f21ee92cf0b2ec83ec7d1347e
| 246
|
py
|
Python
|
nmigen_boards/arrow_sockit.py
|
hansfbaier/amaranth-boards
|
a3e92db69e74cc18a42808f6f72068f05efe018e
|
[
"BSD-2-Clause"
] | 1
|
2022-01-22T20:23:07.000Z
|
2022-01-22T20:23:07.000Z
|
nmigen_boards/arrow_sockit.py
|
amaranth-community-unofficial/amaranth-boards
|
eacb18700d0ed97f525737ca80d923ebd5851505
|
[
"BSD-2-Clause"
] | null | null | null |
nmigen_boards/arrow_sockit.py
|
amaranth-community-unofficial/amaranth-boards
|
eacb18700d0ed97f525737ca80d923ebd5851505
|
[
"BSD-2-Clause"
] | null | null | null |
from amaranth_boards.arrow_sockit import *
from amaranth_boards.arrow_sockit import __all__
import warnings
warnings.warn("instead of nmigen_boards.arrow_sockit, use amaranth_boards.arrow_sockit",
DeprecationWarning, stacklevel=2)
| 35.142857
| 88
| 0.813008
| 31
| 246
| 6.064516
| 0.516129
| 0.234043
| 0.361702
| 0.398936
| 0.37234
| 0.37234
| 0
| 0
| 0
| 0
| 0
| 0.004673
| 0.130081
| 246
| 7
| 89
| 35.142857
| 0.873832
| 0
| 0
| 0
| 0
| 0
| 0.287449
| 0.222672
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
44238f6ac5c63e24391aa71487f3baf3720a1937
| 186
|
py
|
Python
|
exercises/isfile.py
|
DaniDiazTech/Check-if-a-file-exists-in-Python
|
fbed0ae4e053076568279c6ba8a4531a54458d9e
|
[
"MIT"
] | null | null | null |
exercises/isfile.py
|
DaniDiazTech/Check-if-a-file-exists-in-Python
|
fbed0ae4e053076568279c6ba8a4531a54458d9e
|
[
"MIT"
] | null | null | null |
exercises/isfile.py
|
DaniDiazTech/Check-if-a-file-exists-in-Python
|
fbed0ae4e053076568279c6ba8a4531a54458d9e
|
[
"MIT"
] | null | null | null |
import os
print(os.path.isfile('testfile.txt'))
print(os.path.isfile('testdirectory'))
print(os.path.isfile('i-dont-even-exist'))
print(os.path.isfile('testdirectory/otherfile.txt'))
| 20.666667
| 52
| 0.747312
| 28
| 186
| 4.964286
| 0.464286
| 0.201439
| 0.316547
| 0.489209
| 0.431655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 186
| 9
| 52
| 20.666667
| 0.785311
| 0
| 0
| 0
| 0
| 0
| 0.368984
| 0.144385
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0.8
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
447b8a423e97efe6d73ca0a0d5ba42e97914570b
| 113
|
py
|
Python
|
source/documentModel/representations/__init__.py
|
vilaras/PyINSECT
|
4cc20659c47bd5f9c394d175cb041b729f6eb132
|
[
"Apache-2.0"
] | 178
|
2016-09-21T19:51:28.000Z
|
2021-09-07T17:37:06.000Z
|
source/documentModel/representations/__init__.py
|
vilaras/PyINSECT
|
4cc20659c47bd5f9c394d175cb041b729f6eb132
|
[
"Apache-2.0"
] | 2
|
2021-05-27T19:47:25.000Z
|
2021-05-28T17:11:23.000Z
|
source/documentModel/representations/__init__.py
|
vilaras/PyINSECT
|
4cc20659c47bd5f9c394d175cb041b729f6eb132
|
[
"Apache-2.0"
] | 17
|
2016-10-21T02:11:13.000Z
|
2020-10-07T19:11:54.000Z
|
from DocumentNGramGaussNormGraph import *
from DocumentNGramSymWinGraph import *
from DocumentNGramGraph import *
| 37.666667
| 41
| 0.876106
| 9
| 113
| 11
| 0.555556
| 0.20202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097345
| 113
| 3
| 42
| 37.666667
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
92713a2db2ae9da14a6bcac35118901a0e51ccaf
| 441
|
py
|
Python
|
tg-bot/tg_bot/commands.py
|
mrR2D2/rss-tg-bot
|
0400595e6f2ea7dce975cadbd50e3ac05400b9ec
|
[
"MIT"
] | null | null | null |
tg-bot/tg_bot/commands.py
|
mrR2D2/rss-tg-bot
|
0400595e6f2ea7dce975cadbd50e3ac05400b9ec
|
[
"MIT"
] | null | null | null |
tg-bot/tg_bot/commands.py
|
mrR2D2/rss-tg-bot
|
0400595e6f2ea7dce975cadbd50e3ac05400b9ec
|
[
"MIT"
] | null | null | null |
"""
Module with bot commands.
"""
import telegram.ext
def help(
update: telegram.Update,
context: telegram.ext.CallbackContext,
) -> None:
# TODO: implement
pass
def add_feed(
update: telegram.Update,
context: telegram.ext.CallbackContext,
) -> None:
# TODO: implement
pass
def del_feed(
update: telegram.Update,
context: telegram.ext.CallbackContext,
) -> None:
# TODO: implement
pass
| 14.7
| 42
| 0.655329
| 48
| 441
| 5.979167
| 0.395833
| 0.15331
| 0.209059
| 0.28223
| 0.8223
| 0.8223
| 0.8223
| 0.8223
| 0.8223
| 0.8223
| 0
| 0
| 0.231293
| 441
| 29
| 43
| 15.206897
| 0.846608
| 0.1678
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0
| 1
| 0.1875
| false
| 0.1875
| 0.0625
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
929ed7252c60878bfdb25f83dc5eaaf65e8aaf6d
| 72,002
|
py
|
Python
|
apteco_api/api/shares_api.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | 2
|
2020-05-21T14:24:16.000Z
|
2020-12-03T19:56:34.000Z
|
apteco_api/api/shares_api.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | null | null | null |
apteco_api/api/shares_api.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from apteco_api.api_client import ApiClient
from apteco_api.exceptions import (
ApiTypeError,
ApiValueError
)
class SharesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def shares_create_share(self, data_view_name, **kwargs): # noqa: E501
"""Creates a new share from the given details, sharing from the logged in user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_create_share(data_view_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param CreateShareDetail share_detail: The details of the shareable item (collection, audience, etc) to share and who to share it with
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ShareDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_create_share_with_http_info(data_view_name, **kwargs) # noqa: E501
def shares_create_share_with_http_info(self, data_view_name, **kwargs): # noqa: E501
"""Creates a new share from the given details, sharing from the logged in user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_create_share_with_http_info(data_view_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param CreateShareDetail share_detail: The details of the shareable item (collection, audience, etc) to share and who to share it with
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ShareDetail, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_detail'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_create_share" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_create_share`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'share_detail' in local_var_params:
body_params = local_var_params['share_detail']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json', 'application/xml', 'text/xml', 'application/*+xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShareDetail', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_create_share_update(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Creates a new share update from the given details, sharing from the logged in user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_create_share_update(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to update (required)
:param CreateShareUpdate share_update: The details of the share update, including who to add and remove from the share and notification settings
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CreatedShareUpdateDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_create_share_update_with_http_info(data_view_name, share_id, **kwargs) # noqa: E501
def shares_create_share_update_with_http_info(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Creates a new share update from the given details, sharing from the logged in user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_create_share_update_with_http_info(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to update (required)
:param CreateShareUpdate share_update: The details of the share update, including who to add and remove from the share and notification settings
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CreatedShareUpdateDetail, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_id', 'share_update'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_create_share_update" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_create_share_update`") # noqa: E501
# verify the required parameter 'share_id' is set
if ('share_id' not in local_var_params or
local_var_params['share_id'] is None):
raise ApiValueError("Missing the required parameter `share_id` when calling `shares_create_share_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'share_id' in local_var_params:
path_params['shareId'] = local_var_params['share_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'share_update' in local_var_params:
body_params = local_var_params['share_update']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json', 'application/xml', 'text/xml', 'application/*+xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares/{shareId}/Updates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreatedShareUpdateDetail', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_delete_share(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Deletes the specified share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_delete_share(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to delete (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_delete_share_with_http_info(data_view_name, share_id, **kwargs) # noqa: E501
def shares_delete_share_with_http_info(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Deletes the specified share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_delete_share_with_http_info(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to delete (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_delete_share" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_delete_share`") # noqa: E501
# verify the required parameter 'share_id' is set
if ('share_id' not in local_var_params or
local_var_params['share_id'] is None):
raise ApiValueError("Missing the required parameter `share_id` when calling `shares_delete_share`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'share_id' in local_var_params:
path_params['shareId'] = local_var_params['share_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares/{shareId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_get_share(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Returns the details of a particular share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to view (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ShareDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_get_share_with_http_info(data_view_name, share_id, **kwargs) # noqa: E501
def shares_get_share_with_http_info(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Returns the details of a particular share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_with_http_info(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to view (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ShareDetail, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_get_share" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_get_share`") # noqa: E501
# verify the required parameter 'share_id' is set
if ('share_id' not in local_var_params or
local_var_params['share_id'] is None):
raise ApiValueError("Missing the required parameter `share_id` when calling `shares_get_share`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'share_id' in local_var_params:
path_params['shareId'] = local_var_params['share_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares/{shareId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShareDetail', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_get_share_update(self, data_view_name, share_id, share_update_id, **kwargs): # noqa: E501
"""Returns a specific update that is associated with a particular share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_update(data_view_name, share_id, share_update_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share the update is associated with (required)
:param int share_update_id: The id of the share update to view (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ShareUpdate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_get_share_update_with_http_info(data_view_name, share_id, share_update_id, **kwargs) # noqa: E501
def shares_get_share_update_with_http_info(self, data_view_name, share_id, share_update_id, **kwargs): # noqa: E501
"""Returns a specific update that is associated with a particular share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_update_with_http_info(data_view_name, share_id, share_update_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share the update is associated with (required)
:param int share_update_id: The id of the share update to view (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ShareUpdate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_id', 'share_update_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_get_share_update" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_get_share_update`") # noqa: E501
# verify the required parameter 'share_id' is set
if ('share_id' not in local_var_params or
local_var_params['share_id'] is None):
raise ApiValueError("Missing the required parameter `share_id` when calling `shares_get_share_update`") # noqa: E501
# verify the required parameter 'share_update_id' is set
if ('share_update_id' not in local_var_params or
local_var_params['share_update_id'] is None):
raise ApiValueError("Missing the required parameter `share_update_id` when calling `shares_get_share_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'share_id' in local_var_params:
path_params['shareId'] = local_var_params['share_id'] # noqa: E501
if 'share_update_id' in local_var_params:
path_params['shareUpdateId'] = local_var_params['share_update_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares/{shareId}/Updates/{shareUpdateId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShareUpdate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_get_share_update_added_users(self, data_view_name, share_id, share_update_id, **kwargs): # noqa: E501
"""Returns the list of the users added to a share as part of a specific update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_update_added_users(data_view_name, share_id, share_update_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share the update is associated with (required)
:param int share_update_id: The id of the share update to view (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are Firstname, Surname
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are Firstname, Surname
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PagedResultsUserDisplayDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_get_share_update_added_users_with_http_info(data_view_name, share_id, share_update_id, **kwargs) # noqa: E501
def shares_get_share_update_added_users_with_http_info(self, data_view_name, share_id, share_update_id, **kwargs): # noqa: E501
"""Returns the list of the users added to a share as part of a specific update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_update_added_users_with_http_info(data_view_name, share_id, share_update_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share the update is associated with (required)
:param int share_update_id: The id of the share update to view (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are Firstname, Surname
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are Firstname, Surname
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PagedResultsUserDisplayDetails, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_id', 'share_update_id', 'filter', 'order_by', 'offset', 'count'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_get_share_update_added_users" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_get_share_update_added_users`") # noqa: E501
# verify the required parameter 'share_id' is set
if ('share_id' not in local_var_params or
local_var_params['share_id'] is None):
raise ApiValueError("Missing the required parameter `share_id` when calling `shares_get_share_update_added_users`") # noqa: E501
# verify the required parameter 'share_update_id' is set
if ('share_update_id' not in local_var_params or
local_var_params['share_update_id'] is None):
raise ApiValueError("Missing the required parameter `share_update_id` when calling `shares_get_share_update_added_users`") # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `offset` when calling `shares_get_share_update_added_users`, must be a value greater than or equal to `0`") # noqa: E501
if 'count' in local_var_params and local_var_params['count'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `count` when calling `shares_get_share_update_added_users`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'share_id' in local_var_params:
path_params['shareId'] = local_var_params['share_id'] # noqa: E501
if 'share_update_id' in local_var_params:
path_params['shareUpdateId'] = local_var_params['share_update_id'] # noqa: E501
query_params = []
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'order_by' in local_var_params:
query_params.append(('orderBy', local_var_params['order_by'])) # noqa: E501
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'count' in local_var_params:
query_params.append(('count', local_var_params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares/{shareId}/Updates/{shareUpdateId}/AddedUsers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResultsUserDisplayDetails', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_get_share_update_removed_users(self, data_view_name, share_id, share_update_id, **kwargs): # noqa: E501
"""Returns the list of the users removed from a share as part of a specific update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_update_removed_users(data_view_name, share_id, share_update_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share the update is associated with (required)
:param int share_update_id: The id of the share update to view (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are Firstname, Surname
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are Firstname, Surname
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PagedResultsUserDisplayDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_get_share_update_removed_users_with_http_info(data_view_name, share_id, share_update_id, **kwargs) # noqa: E501
def shares_get_share_update_removed_users_with_http_info(self, data_view_name, share_id, share_update_id, **kwargs): # noqa: E501
"""Returns the list of the users removed from a share as part of a specific update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_update_removed_users_with_http_info(data_view_name, share_id, share_update_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share the update is associated with (required)
:param int share_update_id: The id of the share update to view (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are Firstname, Surname
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are Firstname, Surname
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PagedResultsUserDisplayDetails, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_id', 'share_update_id', 'filter', 'order_by', 'offset', 'count'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_get_share_update_removed_users" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_get_share_update_removed_users`") # noqa: E501
# verify the required parameter 'share_id' is set
if ('share_id' not in local_var_params or
local_var_params['share_id'] is None):
raise ApiValueError("Missing the required parameter `share_id` when calling `shares_get_share_update_removed_users`") # noqa: E501
# verify the required parameter 'share_update_id' is set
if ('share_update_id' not in local_var_params or
local_var_params['share_update_id'] is None):
raise ApiValueError("Missing the required parameter `share_update_id` when calling `shares_get_share_update_removed_users`") # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `offset` when calling `shares_get_share_update_removed_users`, must be a value greater than or equal to `0`") # noqa: E501
if 'count' in local_var_params and local_var_params['count'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `count` when calling `shares_get_share_update_removed_users`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'share_id' in local_var_params:
path_params['shareId'] = local_var_params['share_id'] # noqa: E501
if 'share_update_id' in local_var_params:
path_params['shareUpdateId'] = local_var_params['share_update_id'] # noqa: E501
query_params = []
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'order_by' in local_var_params:
query_params.append(('orderBy', local_var_params['order_by'])) # noqa: E501
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'count' in local_var_params:
query_params.append(('count', local_var_params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares/{shareId}/Updates/{shareUpdateId}/RemovedUsers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResultsUserDisplayDetails', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_get_share_updates(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Returns the updates that are associated with a particular share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_updates(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to view (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are Timestamp, Notes
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are Timestamp, Notes
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PagedResultsShareUpdate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_get_share_updates_with_http_info(data_view_name, share_id, **kwargs) # noqa: E501
def shares_get_share_updates_with_http_info(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Returns the updates that are associated with a particular share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_updates_with_http_info(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to view (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are Timestamp, Notes
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are Timestamp, Notes
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PagedResultsShareUpdate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_id', 'filter', 'order_by', 'offset', 'count'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_get_share_updates" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_get_share_updates`") # noqa: E501
# verify the required parameter 'share_id' is set
if ('share_id' not in local_var_params or
local_var_params['share_id'] is None):
raise ApiValueError("Missing the required parameter `share_id` when calling `shares_get_share_updates`") # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `offset` when calling `shares_get_share_updates`, must be a value greater than or equal to `0`") # noqa: E501
if 'count' in local_var_params and local_var_params['count'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `count` when calling `shares_get_share_updates`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'share_id' in local_var_params:
path_params['shareId'] = local_var_params['share_id'] # noqa: E501
query_params = []
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'order_by' in local_var_params:
query_params.append(('orderBy', local_var_params['order_by'])) # noqa: E501
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'count' in local_var_params:
query_params.append(('count', local_var_params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares/{shareId}/Updates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResultsShareUpdate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_get_share_users(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Returns the list of users that are associated with a particular share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_users(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to view the users for (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are Username, EmailAddress
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are Username, EmailAddress
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PagedResultsUserDisplayDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_get_share_users_with_http_info(data_view_name, share_id, **kwargs) # noqa: E501
def shares_get_share_users_with_http_info(self, data_view_name, share_id, **kwargs): # noqa: E501
"""Returns the list of users that are associated with a particular share # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_share_users_with_http_info(data_view_name, share_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param int share_id: The id of the share to view the users for (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are Username, EmailAddress
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are Username, EmailAddress
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PagedResultsUserDisplayDetails, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'share_id', 'filter', 'order_by', 'offset', 'count'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_get_share_users" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_get_share_users`") # noqa: E501
# verify the required parameter 'share_id' is set
if ('share_id' not in local_var_params or
local_var_params['share_id'] is None):
raise ApiValueError("Missing the required parameter `share_id` when calling `shares_get_share_users`") # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `offset` when calling `shares_get_share_users`, must be a value greater than or equal to `0`") # noqa: E501
if 'count' in local_var_params and local_var_params['count'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `count` when calling `shares_get_share_users`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'share_id' in local_var_params:
path_params['shareId'] = local_var_params['share_id'] # noqa: E501
query_params = []
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'order_by' in local_var_params:
query_params.append(('orderBy', local_var_params['order_by'])) # noqa: E501
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'count' in local_var_params:
query_params.append(('count', local_var_params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares/{shareId}/Users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResultsUserDisplayDetails', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shares_get_shares(self, data_view_name, **kwargs): # noqa: E501
"""Requires OrbitAdmin: Gets summary information about each share in the DataView. # noqa: E501
This endpoint is only available for users with the OrbitAdmin role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_shares(data_view_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are ShareableId, ShareableType, NumberOfUsersSharedWith
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are ShareableId, ShareableType, NumberOfUsersSharedWith
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PagedResultsShareSummary
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.shares_get_shares_with_http_info(data_view_name, **kwargs) # noqa: E501
def shares_get_shares_with_http_info(self, data_view_name, **kwargs): # noqa: E501
"""Requires OrbitAdmin: Gets summary information about each share in the DataView. # noqa: E501
This endpoint is only available for users with the OrbitAdmin role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shares_get_shares_with_http_info(data_view_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param str filter: Filter the list of items using a simple expression language. The available list of fields are ShareableId, ShareableType, NumberOfUsersSharedWith
:param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character). The available list of fields are ShareableId, ShareableType, NumberOfUsersSharedWith
:param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
:param int count: The maximum number of items to show from the (potentially filtered) result set.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PagedResultsShareSummary, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'filter', 'order_by', 'offset', 'count'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method shares_get_shares" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `shares_get_shares`") # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `offset` when calling `shares_get_shares`, must be a value greater than or equal to `0`") # noqa: E501
if 'count' in local_var_params and local_var_params['count'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `count` when calling `shares_get_shares`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
query_params = []
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'order_by' in local_var_params:
query_params.append(('orderBy', local_var_params['order_by'])) # noqa: E501
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'count' in local_var_params:
query_params.append(('count', local_var_params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Shares', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResultsShareSummary', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 55.343582
| 220
| 0.642135
| 8,951
| 72,002
| 4.913194
| 0.031058
| 0.045113
| 0.072582
| 0.026923
| 0.979467
| 0.978785
| 0.978171
| 0.977079
| 0.976261
| 0.975852
| 0
| 0.01185
| 0.283909
| 72,002
| 1,300
| 221
| 55.386154
| 0.841098
| 0.463432
| 0
| 0.809446
| 0
| 0.016287
| 0.259945
| 0.064398
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034202
| false
| 0
| 0.008143
| 0
| 0.076547
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2bbdf7b77f790588ebd0a84353c40e24f3adcdbf
| 5,178
|
py
|
Python
|
simple_repl/tests/test_lexer.py
|
vaskinyy/kata-repl
|
ec27158cd8173fab4981f01624e94166d331dcbc
|
[
"MIT"
] | null | null | null |
simple_repl/tests/test_lexer.py
|
vaskinyy/kata-repl
|
ec27158cd8173fab4981f01624e94166d331dcbc
|
[
"MIT"
] | null | null | null |
simple_repl/tests/test_lexer.py
|
vaskinyy/kata-repl
|
ec27158cd8173fab4981f01624e94166d331dcbc
|
[
"MIT"
] | null | null | null |
import unittest
from repl import lexems
from repl.lexer import Lexer, Token
class Test_Lexer(unittest.TestCase):
def setUp(self):
self.lexer = Lexer()
def test_example_assignment(self):
tokens = self.lexer.parse("x = 7")
result = [Token(lexems.LETTER, 'x'),
Token(lexems.ASSIGNMENT, lexems.ASSIGNMENT),
Token(lexems.DIGIT, 7.0),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_operation(self):
tokens = self.lexer.parse("x + 6")
result = [Token(lexems.LETTER, 'x'),
Token(lexems.PLUS, lexems.PLUS),
Token(lexems.DIGIT, 6.0),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_assignment_chain(self):
tokens = self.lexer.parse("x = y = 7")
result = [Token(lexems.LETTER, 'x'),
Token(lexems.ASSIGNMENT, lexems.ASSIGNMENT),
Token(lexems.LETTER, 'y'),
Token(lexems.ASSIGNMENT, lexems.ASSIGNMENT),
Token(lexems.DIGIT, 7.0),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_assignment_chain_hard(self):
tokens = self.lexer.parse("x = 13 + (y = 3)")
result = [Token(lexems.LETTER, 'x'),
Token(lexems.ASSIGNMENT, lexems.ASSIGNMENT),
Token(lexems.DIGIT, 13.0),
Token(lexems.PLUS, lexems.PLUS),
Token(lexems.OPEN_BRACKET, lexems.OPEN_BRACKET),
Token(lexems.LETTER, 'y'),
Token(lexems.ASSIGNMENT, lexems.ASSIGNMENT),
Token(lexems.DIGIT, 3.0),
Token(lexems.CLOSE_BRACKET, lexems.CLOSE_BRACKET),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_fn_definition(self):
tokens = self.lexer.parse("fn avg => (x + y) / 2")
result = [Token(lexems.FN_KEYWORD, lexems.FN_KEYWORD),
Token(lexems.LETTER, "avg"),
Token(lexems.FN_OPERATOR, lexems.FN_OPERATOR),
Token(lexems.OPEN_BRACKET, lexems.OPEN_BRACKET),
Token(lexems.LETTER, 'x'),
Token(lexems.PLUS, lexems.PLUS),
Token(lexems.LETTER, 'y'),
Token(lexems.CLOSE_BRACKET, lexems.CLOSE_BRACKET),
Token(lexems.DIVIDE, lexems.DIVIDE),
Token(lexems.DIGIT, 2.0),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_fn_definition_error(self):
tokens = self.lexer.parse("fn add x y => x + z")
result = [Token(lexems.FN_KEYWORD, lexems.FN_KEYWORD),
Token(lexems.LETTER, "add"),
Token(lexems.LETTER, 'x'),
Token(lexems.LETTER, 'y'),
Token(lexems.FN_OPERATOR, lexems.FN_OPERATOR),
Token(lexems.LETTER, 'x'),
Token(lexems.PLUS, lexems.PLUS),
Token(lexems.LETTER, 'z'),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_fn_definition_echo(self):
tokens = self.lexer.parse("fn echo x => x")
result = [Token(lexems.FN_KEYWORD, lexems.FN_KEYWORD),
Token(lexems.LETTER, "echo"),
Token(lexems.LETTER, 'x'),
Token(lexems.FN_OPERATOR, lexems.FN_OPERATOR),
Token(lexems.LETTER, 'x'),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_fn_definition_add(self):
tokens = self.lexer.parse("fn add x y => x+y")
result = [Token(lexems.FN_KEYWORD, lexems.FN_KEYWORD),
Token(lexems.LETTER, "add"),
Token(lexems.LETTER, 'x'),
Token(lexems.LETTER, 'y'),
Token(lexems.FN_OPERATOR, lexems.FN_OPERATOR),
Token(lexems.LETTER, 'x'),
Token(lexems.PLUS, lexems.PLUS),
Token(lexems.LETTER, 'y'),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_fn_definition_inc(self):
tokens = self.lexer.parse("fn inc x=>x+1")
result = [Token(lexems.FN_KEYWORD, lexems.FN_KEYWORD),
Token(lexems.LETTER, "inc"),
Token(lexems.LETTER, 'x'),
Token(lexems.FN_OPERATOR, lexems.FN_OPERATOR),
Token(lexems.LETTER, 'x'),
Token(lexems.PLUS, lexems.PLUS),
Token(lexems.DIGIT, 1.0),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
def test_example_fn_call_inc(self):
tokens = self.lexer.parse("a = inc a")
result = [Token(lexems.LETTER, "a"),
Token(lexems.ASSIGNMENT, lexems.ASSIGNMENT),
Token(lexems.LETTER, 'inc'),
Token(lexems.LETTER, 'a'),
Token(lexems.EOF, "")]
self.assertEqual(tokens, result)
| 42.442623
| 68
| 0.539397
| 557
| 5,178
| 4.901257
| 0.091562
| 0.29011
| 0.174359
| 0.085714
| 0.905128
| 0.905128
| 0.821978
| 0.787179
| 0.767399
| 0.72381
| 0
| 0.006642
| 0.331209
| 5,178
| 121
| 69
| 42.793388
| 0.781692
| 0
| 0
| 0.638889
| 0
| 0
| 0.032638
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 1
| 0.101852
| false
| 0
| 0.027778
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a60a7717a4c08bff365b417588386c2ca701f60f
| 1,855
|
py
|
Python
|
problems/tests/test_search_in_rotated_sorted_array.py
|
vinta/fuck-coding-interviews
|
915ff55963430e81134a35f65f511e5684c52f11
|
[
"MIT"
] | 590
|
2020-06-17T08:26:47.000Z
|
2022-03-30T18:47:32.000Z
|
problems/tests/test_search_in_rotated_sorted_array.py
|
parvathirajan/fuck-coding-interviews
|
915ff55963430e81134a35f65f511e5684c52f11
|
[
"MIT"
] | 12
|
2020-07-14T09:24:32.000Z
|
2020-11-02T03:43:47.000Z
|
problems/tests/test_search_in_rotated_sorted_array.py
|
parvathirajan/fuck-coding-interviews
|
915ff55963430e81134a35f65f511e5684c52f11
|
[
"MIT"
] | 75
|
2020-07-29T06:50:13.000Z
|
2022-03-13T16:14:57.000Z
|
# coding: utf-8
import unittest
from problems.search_in_rotated_sorted_array import Solution
from problems.search_in_rotated_sorted_array import Solution2
class TestCase(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test(self):
test_data = [
{'nums': [4, 5, 6, 7, 0, 1, 2], 'target': 0, 'expected': 4},
{'nums': [4, 5, 6, 7, 0, 1, 2], 'target': 3, 'expected': -1},
{'nums': [3, 5, 7, 9, 11, 1], 'target': 9, 'expected': 3},
{'nums': [12, 5, 7, 9, 11], 'target': 12, 'expected': 0},
{'nums': [1, ], 'target': 1, 'expected': 0},
{'nums': [1, ], 'target': 0, 'expected': -1},
]
for data in test_data:
nums = data['nums']
target = data['target']
expected = data['expected']
with self.subTest(nums=nums, target=target):
self.assertEqual(self.solution.search(nums, target), expected)
class TestCase2(unittest.TestCase):
def setUp(self):
self.solution = Solution2()
def test(self):
test_data = [
{'nums': [4, 5, 6, 7, 0, 1, 2], 'target': 0, 'expected': 4},
{'nums': [4, 5, 6, 7, 0, 1, 2], 'target': 3, 'expected': -1},
{'nums': [3, 5, 7, 9, 11, 1], 'target': 9, 'expected': 3},
{'nums': [12, 5, 7, 9, 11], 'target': 12, 'expected': 0},
{'nums': [1, ], 'target': 1, 'expected': 0},
{'nums': [1, ], 'target': 0, 'expected': -1},
]
for data in test_data:
nums = data['nums']
target = data['target']
expected = data['expected']
with self.subTest(nums=nums, target=target):
self.assertEqual(self.solution.search(nums, target), expected)
if __name__ == '__main__':
unittest.main()
| 35.673077
| 78
| 0.505121
| 230
| 1,855
| 3.986957
| 0.186957
| 0.052345
| 0.052345
| 0.030534
| 0.883315
| 0.883315
| 0.883315
| 0.796074
| 0.700109
| 0.700109
| 0
| 0.069767
| 0.304582
| 1,855
| 51
| 79
| 36.372549
| 0.641085
| 0.007008
| 0
| 0.731707
| 0
| 0
| 0.141304
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 1
| 0.097561
| false
| 0
| 0.073171
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a60e90561e1de6f7fbed3cb756f560fe2fead6aa
| 1,253
|
py
|
Python
|
Other/clone_datablock/run.py
|
DisplaySweet/ogre-next
|
6d03e443236867193cb3923b4c538e1820ea2827
|
[
"MIT"
] | 4
|
2018-11-12T02:18:44.000Z
|
2020-09-03T12:16:14.000Z
|
Other/clone_datablock/run.py
|
DisplaySweet/ogre-next
|
6d03e443236867193cb3923b4c538e1820ea2827
|
[
"MIT"
] | 2
|
2020-12-01T18:20:39.000Z
|
2021-02-09T21:04:25.000Z
|
Other/clone_datablock/run.py
|
DisplaySweet/ogre-next
|
6d03e443236867193cb3923b4c538e1820ea2827
|
[
"MIT"
] | 3
|
2020-10-15T15:11:41.000Z
|
2021-04-04T03:38:54.000Z
|
from subprocess import call
call( ["python2", \
"clone_datablock.py", \
"../../Components/Hlms/Pbs/include/OgreHlmsPbsDatablock.h", \
"-I", "../../Components/Hlms/Pbs/include", \
"-I", "../../OgreMain/include/", \
"-I", "../../build/include", \
"-I", "../../build/Debug/include", \
"-I", "../../build/Release/include", \
] )
call( ["python2", \
"clone_datablock.py", \
"../../Components/Hlms/Unlit/include/OgreHlmsUnlitDatablock.h", \
"-I", "../../Components/Hlms/Unlit/include", \
"-I", "../../OgreMain/include/", \
"-I", "../../build/include", \
"-I", "../../build/Debug/include", \
"-I", "../../build/Release/include", \
] )
call( ["python2", \
"clone_datablock.py", \
"../../Samples/2.0/Tutorials/Tutorial_Terrain/include/Terra/Hlms/OgreHlmsTerraDatablock.h", \
"-I", "../../Samples/2.0/Tutorials/Tutorial_Terrain/include", \
"-I", "../../Samples/2.0/Tutorials/Tutorial_Terrain/include/Terra/Hlms", \
"-I", "../../OgreMain/include/", \
"-I", "../../build/include", \
"-I", "../../build/Debug/include", \
"-I", "../../build/Release/include", \
] )
| 39.15625
| 101
| 0.495611
| 112
| 1,253
| 5.491071
| 0.258929
| 0.156098
| 0.190244
| 0.121951
| 0.754472
| 0.754472
| 0.754472
| 0.665041
| 0.596748
| 0.437398
| 0
| 0.009259
| 0.224262
| 1,253
| 31
| 102
| 40.419355
| 0.623457
| 0
| 0
| 0.724138
| 0
| 0
| 0.619808
| 0.488818
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.034483
| 0
| 0.034483
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a64f6a8efc28b3f2bc7b74611dfd326ec62dcda9
| 68,621
|
py
|
Python
|
scripts/sampleOutputs/bkup/cmp_astarzeusmpcactusADMGemsFDTD_reverse/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
scripts/sampleOutputs/bkup/cmp_astarzeusmpcactusADMGemsFDTD_reverse/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
scripts/sampleOutputs/bkup/cmp_astarzeusmpcactusADMGemsFDTD_reverse/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0602004,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.249973,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.352095,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.314361,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.54436,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.312206,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.17093,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.256751,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.98962,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0665183,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0113958,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.103846,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0842792,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.170365,
'Execution Unit/Register Files/Runtime Dynamic': 0.095675,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.267428,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.716377,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.63833,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00138396,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00138396,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00122809,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00048781,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00121068,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00520668,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0124595,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0810197,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.15355,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.201882,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.275179,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.62601,
'Instruction Fetch Unit/Runtime Dynamic': 0.575748,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0643898,
'L2/Runtime Dynamic': 0.0164563,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.08869,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.39002,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0922551,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0922551,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.52611,
'Load Store Unit/Runtime Dynamic': 1.93725,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.227485,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.454971,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0807353,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0816926,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.320429,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0331246,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.618503,
'Memory Management Unit/Runtime Dynamic': 0.114817,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 23.3863,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.232068,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0188672,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.161487,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.412421,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.69502,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0339918,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.229387,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.215349,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0880819,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.142073,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0717136,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.301868,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0677235,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.31648,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0406841,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00369455,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0381498,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0273235,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0788339,
'Execution Unit/Register Files/Runtime Dynamic': 0.031018,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0888708,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.250476,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.21813,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000139104,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000139104,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000125075,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 5.05604e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000392504,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000795786,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0011938,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0262667,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.67079,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0664055,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0892137,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.97039,
'Instruction Fetch Unit/Runtime Dynamic': 0.183876,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0489591,
'L2/Runtime Dynamic': 0.0145284,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.31816,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.540708,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0349741,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0349741,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.48331,
'Load Store Unit/Runtime Dynamic': 0.748163,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0862403,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.17248,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0306069,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0313373,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.103883,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.010901,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.31257,
'Memory Management Unit/Runtime Dynamic': 0.0422383,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.7212,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.107021,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00527643,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.043254,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.155551,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.36248,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0654122,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.254067,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.410987,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.166676,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.268842,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.135702,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.571221,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.127618,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.76361,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0776442,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00699114,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.072696,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0517038,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.15034,
'Execution Unit/Register Files/Runtime Dynamic': 0.0586949,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.169507,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.448648,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.73801,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000399744,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000399744,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000364339,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000149881,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000742729,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00190655,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00325526,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0497042,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.16161,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.123156,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.168818,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.53357,
'Instruction Fetch Unit/Runtime Dynamic': 0.34684,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0353197,
'L2/Runtime Dynamic': 0.00964208,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.16149,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.93713,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.062258,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.062258,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.45549,
'Load Store Unit/Runtime Dynamic': 1.30642,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.153518,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.307035,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0544839,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0550033,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.196577,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0202221,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.44628,
'Memory Management Unit/Runtime Dynamic': 0.0752255,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.8237,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.204247,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0100056,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0815243,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.295776,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.77191,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.108562,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.287958,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.532813,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.259098,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.417915,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.210949,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.887962,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.214644,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.21241,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.10066,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0108677,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.121404,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0803735,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.222064,
'Execution Unit/Register Files/Runtime Dynamic': 0.0912412,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.282911,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.654628,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.32717,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000600861,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000600861,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000524696,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000203855,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00115457,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00288099,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00571287,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0772651,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.91472,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.170567,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.262427,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.37176,
'Instruction Fetch Unit/Runtime Dynamic': 0.518853,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.105786,
'L2/Runtime Dynamic': 0.0270954,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.95213,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.892432,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0554848,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0554848,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.21414,
'Load Store Unit/Runtime Dynamic': 1.22155,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.136816,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.273632,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0485564,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.050139,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.30558,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0279807,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.5451,
'Memory Management Unit/Runtime Dynamic': 0.0781197,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 20.0387,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.264789,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0149122,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.128509,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.40821,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.58099,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 4.952919993523108,
'Runtime Dynamic': 4.952919993523108,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.283222,
'Runtime Dynamic': 0.126812,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 76.2531,
'Peak Power': 109.365,
'Runtime Dynamic': 16.5372,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 75.9699,
'Total Cores/Runtime Dynamic': 16.4104,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.283222,
'Total L3s/Runtime Dynamic': 0.126812,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.077681
| 124
| 0.682124
| 8,083
| 68,621
| 5.784981
| 0.067673
| 0.123524
| 0.112917
| 0.093413
| 0.940163
| 0.931672
| 0.918285
| 0.886912
| 0.862682
| 0.841959
| 0
| 0.132106
| 0.22429
| 68,621
| 914
| 125
| 75.077681
| 0.746346
| 0
| 0
| 0.642232
| 0
| 0
| 0.657296
| 0.04809
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b7637b821c25029005f9044213ea81dbb6eb9f1
| 162
|
py
|
Python
|
Applied_Linear_Algebra/Week 0/dictutil.py
|
eneskemalergin/Linear_Algebra
|
405dfab7090f8fb291b856b8e8a477f43eaff532
|
[
"CNRI-Python"
] | null | null | null |
Applied_Linear_Algebra/Week 0/dictutil.py
|
eneskemalergin/Linear_Algebra
|
405dfab7090f8fb291b856b8e8a477f43eaff532
|
[
"CNRI-Python"
] | null | null | null |
Applied_Linear_Algebra/Week 0/dictutil.py
|
eneskemalergin/Linear_Algebra
|
405dfab7090f8fb291b856b8e8a477f43eaff532
|
[
"CNRI-Python"
] | null | null | null |
# Copyright 2013 Philip N. Klein
def dict2list(dct, keylist): pass
def list2dict(L, keylist): pass
def listrange2dict(L): return list2dict(L, range(0, len(L)))
| 23.142857
| 60
| 0.734568
| 25
| 162
| 4.76
| 0.68
| 0.184874
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064286
| 0.135802
| 162
| 6
| 61
| 27
| 0.785714
| 0.185185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| false
| 0.666667
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
5b7ad434989df43039428c13ce7ca123e0b1061c
| 24,177
|
py
|
Python
|
tests/test_cli.py
|
michaelloewenstein/dicomweb-client
|
0ea37db68a2b9c8373c964e610acea945b7b07b7
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
michaelloewenstein/dicomweb-client
|
0ea37db68a2b9c8373c964e610acea945b7b07b7
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
michaelloewenstein/dicomweb-client
|
0ea37db68a2b9c8373c964e610acea945b7b07b7
|
[
"MIT"
] | null | null | null |
import json
import tempfile
import pytest
from dicomweb_client.api import load_json_dataset
from dicomweb_client.cli import main
def test_parse_search_studies(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'studies'
])
assert getattr(args, 'method') == 'search'
assert getattr(args, 'qido_ie') == 'studies'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
with pytest.raises(AttributeError):
getattr(args, 'study_instance_uid')
with pytest.raises(AttributeError):
getattr(args, 'series_instance_uid')
with pytest.raises(AttributeError):
getattr(args, 'sop_instance_uid')
def test_parse_search_studies_series(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002',
'search', 'studies', '--series', '1.2.3'
])
def test_parse_search_studies_instance(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'studies',
'--instance', '1.2.3'
])
def test_parse_search_series(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'series'
])
assert getattr(args, 'method') == 'search'
assert getattr(args, 'qido_ie') == 'series'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
assert getattr(args, 'study_instance_uid') is None
with pytest.raises(AttributeError):
getattr(args, 'series_instance_uid')
with pytest.raises(AttributeError):
getattr(args, 'sop_instance_uid')
def test_parse_search_series_specific_study(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'series',
'--study', '1.2.3'
])
assert getattr(args, 'method') == 'search'
assert getattr(args, 'qido_ie') == 'series'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
assert getattr(args, 'study_instance_uid') == '1.2.3'
with pytest.raises(AttributeError):
getattr(args, 'series_instance_uid')
with pytest.raises(AttributeError):
getattr(args, 'sop_instance_uid')
def test_parse_search_series_wrong_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'series',
'--series', '1.2.3'
])
def test_parse_search_instances(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'instances'
])
assert getattr(args, 'method') == 'search'
assert getattr(args, 'qido_ie') == 'instances'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
assert getattr(args, 'study_instance_uid') is None
assert getattr(args, 'series_instance_uid') is None
with pytest.raises(AttributeError):
getattr(args, 'sop_instance_uid')
def test_parse_search_instances_specific_study(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'instances',
'--study', '1.2.3'
])
assert getattr(args, 'method') == 'search'
assert getattr(args, 'qido_ie') == 'instances'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'series_instance_uid') is None
with pytest.raises(AttributeError):
getattr(args, 'sop_instance_uid')
def test_parse_search_instances_specific_study_series(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'instances',
'--study', '1.2.3', '--series', '1.2.4'
])
assert getattr(args, 'method') == 'search'
assert getattr(args, 'qido_ie') == 'instances'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'series_instance_uid') == '1.2.4'
with pytest.raises(AttributeError):
getattr(args, 'sop_instance_uid')
def test_parse_search_instances_prettify(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'instances', '--prettify'
])
assert getattr(args, 'method') == 'search'
assert getattr(args, 'qido_ie') == 'instances'
assert getattr(args, 'prettify') is True
assert getattr(args, 'dicomize') is False
def test_parse_search_instances_dicomize(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'instances', '--dicomize'
])
assert getattr(args, 'method') == 'search'
assert getattr(args, 'qido_ie') == 'instances'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is True
def test_parse_search_instances_argument_conflict(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'search', 'instances',
'--prettify', '--dicomize'
])
def test_parse_retrieve_study(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'studies',
'--study', '1.2.3', 'full'
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'studies'
assert getattr(args, 'studies_resource') == 'full'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'save') is False
with pytest.raises(AttributeError):
getattr(args, 'prettify')
with pytest.raises(AttributeError):
getattr(args, 'dicomize')
def test_parse_retrieve_study_save(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'studies',
'--study', '1.2.3', 'full', '--save'
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'studies'
assert getattr(args, 'studies_resource') == 'full'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'save') is True
assert getattr(args, 'output_dir') == tempfile.gettempdir()
def test_parse_retrieve_study_metadata(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'studies',
'--study', '1.2.3', 'metadata'
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'studies'
assert getattr(args, 'studies_resource') == 'metadata'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
with pytest.raises(AttributeError):
getattr(args, 'series_instance_uid')
with pytest.raises(AttributeError):
getattr(args, 'sop_instance_uid')
def test_parse_retrieve_study_metadata_unsupported_argument_media_type(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'studies',
'--study', '1.2.3', 'metadata', '--media-type', 'application/dicom'
])
def test_parse_retrieve_study_metadata_missing_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'studies', 'metadata'
])
def test_parse_retrieve_study_metadata_wrong_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'studies',
'--series', '1.2.3', 'metadata'
])
def test_parse_retrieve_series(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'series',
'--study', '1.2.3', '--series', '1.2.4', 'full'
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'series'
assert getattr(args, 'series_resource') == 'full'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'series_instance_uid') == '1.2.4'
assert getattr(args, 'save') is False
with pytest.raises(AttributeError):
getattr(args, 'prettify')
with pytest.raises(AttributeError):
getattr(args, 'dicomize')
def test_parse_retrieve_series_save(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'series',
'--study', '1.2.3', '--series', '1.2.4', 'full', '--save'
])
assert getattr(args, 'save') is True
assert getattr(args, 'output_dir') == tempfile.gettempdir()
def test_parse_retrieve_series_save_directory(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'series',
'--study', '1.2.3', '--series', '1.2.4', 'full', '--save',
'--output-dir', '/path/to/dir'
])
assert getattr(args, 'save') is True
assert getattr(args, 'output_dir') == '/path/to/dir'
def test_parse_retrieve_series_metadata(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'series',
'--study', '1.2.3', '--series', '1.2.4', 'metadata'
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'series'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'series_instance_uid') == '1.2.4'
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
with pytest.raises(AttributeError):
getattr(args, 'sop_instance_uid')
def test_parse_retrieve_series_metadata_extra_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'series',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5',
'metadata'
])
def test_parse_retrieve_series_metadata_missing_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'series',
'--study', '1.2.3', 'metadata',
])
def test_parse_store_instances_single_file(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'store', 'instances',
'/path/to/file.dcm',
])
assert getattr(args, 'method') == 'store'
assert getattr(args, 'stow_ie') == 'instances'
assert getattr(args, 'study_instance_uid') is None
assert getattr(args, 'files') == ['/path/to/file.dcm']
def test_parse_store_instances_chunked(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002',
'--chunk-size', '1000',
'store', 'instances',
'/path/to/file.dcm',
])
assert getattr(args, 'chunk_size') == 1000
def test_parse_store_instances_single_file_study_instance_uid(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'store', 'instances',
'/path/to/file.dcm', '--study', '1.2.3',
])
assert getattr(args, 'method') == 'store'
assert getattr(args, 'stow_ie') == 'instances'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'files') == ['/path/to/file.dcm']
def test_parse_store_instances_multiple_files(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'store', 'instances',
'/path/to/f1.dcm', '/path/to/f2.dcm',
])
assert getattr(args, 'method') == 'store'
assert getattr(args, 'stow_ie') == 'instances'
assert getattr(args, 'study_instance_uid') is None
assert getattr(args, 'files') == ['/path/to/f1.dcm', '/path/to/f2.dcm']
def test_parse_store_studies(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'store', 'studies',
'/path/to/file.dcm',
])
def test_parse_store_series(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'store', 'series',
'/path/to/file.dcm',
])
def test_parse_retrieve_instance(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5', 'full',
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'instances'
assert getattr(args, 'instances_resource') == 'full'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'series_instance_uid') == '1.2.4'
assert getattr(args, 'sop_instance_uid') == '1.2.5'
assert getattr(args, 'save') is False
with pytest.raises(AttributeError):
getattr(args, 'prettify')
with pytest.raises(AttributeError):
getattr(args, 'dicomize')
def test_parse_retrieve_instance_chunked(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002',
'--chunk-size', '1000',
'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5', 'full',
])
assert getattr(args, 'chunk_size') == 1000
def test_parse_retrieve_instance_media_types(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5', 'full',
'--media-type', 'application/dicom',
])
assert getattr(args, 'media_types') == [
['application/dicom', ],
]
def test_parse_retrieve_instance_media_types_transfer_syntax(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5', 'full',
'--media-type', 'application/dicom', '1.2.840.10008.1.2.1',
])
assert getattr(args, 'media_types') == [
['application/dicom', '1.2.840.10008.1.2.1', ],
]
def test_parse_retrieve_instance_media_types_transfer_syntax_multiple(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5', 'full',
'--media-type', 'application/dicom', '1.2.840.10008.1.2.1',
'--media-type', 'application/dicom', '1.2.840.10008.1.2.4.90',
])
assert getattr(args, 'media_types') == [
['application/dicom', '1.2.840.10008.1.2.1', ],
['application/dicom', '1.2.840.10008.1.2.4.90', ],
]
def test_parse_retrieve_instance_metadata(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances', '--study',
'1.2.3', '--series', '1.2.4', '--instance', '1.2.5', 'metadata'
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'instances'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'series_instance_uid') == '1.2.4'
assert getattr(args, 'sop_instance_uid') == '1.2.5'
assert getattr(args, 'save') is False
assert getattr(args, 'output_dir') == tempfile.gettempdir()
assert getattr(args, 'prettify') is False
assert getattr(args, 'dicomize') is False
def test_parse_retrieve_instance_metadata_missing_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', 'metadata'
])
def test_parse_retrieve_instance_metadata_missing_argument_2(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--instance', '1.2.5', 'metadata'
])
def test_parse_retrieve_instance_metadata_missing_argument_3(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--series', '1.2.4', '--instance', '1.2.5', 'metadata'
])
def test_parse_retrieve_instance_frames(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5',
'frames', '--numbers', '1',
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'instances'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'series_instance_uid') == '1.2.4'
assert getattr(args, 'sop_instance_uid') == '1.2.5'
assert getattr(args, 'frame_numbers') == [1]
assert getattr(args, 'save') is False
assert getattr(args, 'output_dir') == tempfile.gettempdir()
assert getattr(args, 'show') is False
with pytest.raises(AttributeError):
assert getattr(args, 'prettify')
with pytest.raises(AttributeError):
assert getattr(args, 'dicomize')
def test_parse_retrieve_instance_frames_multiple(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5',
'frames', '--numbers', '1', '2', '3',
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'instances'
assert getattr(args, 'study_instance_uid') == '1.2.3'
assert getattr(args, 'series_instance_uid') == '1.2.4'
assert getattr(args, 'sop_instance_uid') == '1.2.5'
assert getattr(args, 'frame_numbers') == [1, 2, 3]
assert getattr(args, 'save') is False
assert getattr(args, 'output_dir') == tempfile.gettempdir()
assert getattr(args, 'show') is False
with pytest.raises(AttributeError):
assert getattr(args, 'prettify')
with pytest.raises(AttributeError):
assert getattr(args, 'dicomize')
def test_parse_retrieve_instance_frames_show(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5',
'frames', '--numbers', '1', '--show',
])
assert getattr(args, 'show') is True
assert getattr(args, 'save') is False
assert getattr(args, 'output_dir') == tempfile.gettempdir()
def test_parse_retrieve_instance_frames_save(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5',
'frames', '--numbers', '1', '--save',
])
assert getattr(args, 'show') is False
assert getattr(args, 'save') is True
assert getattr(args, 'output_dir') == tempfile.gettempdir()
def test_parse_retrieve_instance_frames_show_save(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5',
'frames', '--numbers', '1', '--save', '--show'
])
assert getattr(args, 'show') is True
assert getattr(args, 'save') is True
assert getattr(args, 'output_dir') == tempfile.gettempdir()
def test_parse_retrieve_instance_frames_save_file(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5',
'frames', '--numbers', '1', '--save', '--output-dir', '/tmp',
])
assert getattr(args, 'show') is False
assert getattr(args, 'save') is True
assert getattr(args, 'output_dir') == '/tmp'
def test_parse_retrieve_instance_frames_missing_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'instances',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5',
'frames', '--numbers',
])
def test_parse_retrieve_study_full_missing_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'studies',
'--study', '1.2.3'
])
def test_parse_retrieve_series_missing_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'series',
'--studies', '1.2.3', '--series', '1.2.4'
])
def test_parse_retrieve_instance_missing_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'series',
'--study', '1.2.3', '--series', '1.2.4', '--instance', '1.2.5'
])
def test_parse_retrieve_bulkdata(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'bulkdata',
'--uri', 'http://localhost:8002/bulk/data'
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'bulkdata'
assert getattr(args, 'media_types') is None
assert getattr(args, 'bulkdata_uri') == 'http://localhost:8002/bulk/data'
def test_parse_retrieve_bulkdata_media_type(parser):
args = parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'bulkdata',
'--uri', 'http://localhost:8002/bulk/data',
'--media-type', 'image/jpeg'
])
assert getattr(args, 'method') == 'retrieve'
assert getattr(args, 'wado_ie') == 'bulkdata'
assert getattr(args, 'media_types') == [['image/jpeg', ], ]
assert getattr(args, 'bulkdata_uri') == 'http://localhost:8002/bulk/data'
def test_parse_retrieve_bulkdata_missing_argument(parser):
with pytest.raises(SystemExit):
parser.parse_args([
'--url', 'http://localhost:8002', 'retrieve', 'bulkdata'
])
def test_search_for_studies(parser, httpserver, cache_dir, capsys):
cache_filename = str(cache_dir.joinpath('search_for_studies.json'))
with open(cache_filename, 'r') as f:
content = f.read()
headers = {'content-type': 'application/dicom+json'}
httpserver.serve_content(content=content, code=200, headers=headers)
args = parser.parse_args([
'--url', httpserver.url, 'search', 'studies',
])
with pytest.raises(SystemExit) as exit:
main(args)
assert exit.value.code == 0
stdout, stderr = capsys.readouterr()
assert stdout == content
def test_search_for_studies_dicomize(parser, httpserver, cache_dir, capsys):
cache_filename = str(cache_dir.joinpath('search_for_studies.json'))
with open(cache_filename, 'r') as f:
content = f.read()
parsed_content = json.loads(content)
dicomized_content = '\n\n\n'.join([
repr(load_json_dataset(instance))
for instance in parsed_content
])
dicomized_content += '\n\n\n'
headers = {'content-type': 'application/dicom+json'}
httpserver.serve_content(content=content, code=200, headers=headers)
args = parser.parse_args([
'--url', httpserver.url, 'search', 'studies', '--dicomize'
])
with pytest.raises(SystemExit) as exit:
main(args)
assert exit.value.code == 0
stdout, stderr = capsys.readouterr()
assert stdout == dicomized_content
def test_search_for_studies_prettify(parser, httpserver, cache_dir, capsys):
cache_filename = str(cache_dir.joinpath('search_for_studies.json'))
with open(cache_filename, 'r') as f:
content = f.read()
parsed_content = json.loads(content)
prettified_content = json.dumps(parsed_content, indent=4, sort_keys=True)
prettified_content += '\n'
headers = {'content-type': 'application/dicom+json'}
httpserver.serve_content(content=content, code=200, headers=headers)
args = parser.parse_args([
'--url', httpserver.url, 'search', 'studies', '--prettify'
])
with pytest.raises(SystemExit) as exit:
main(args)
assert exit.value.code == 0
stdout, stderr = capsys.readouterr()
assert stdout == prettified_content
| 36.855183
| 79
| 0.623651
| 2,916
| 24,177
| 5.001715
| 0.046982
| 0.125951
| 0.172506
| 0.067878
| 0.964278
| 0.941927
| 0.915941
| 0.900583
| 0.889887
| 0.864655
| 0
| 0.033599
| 0.196137
| 24,177
| 655
| 80
| 36.91145
| 0.716851
| 0
| 0
| 0.744015
| 0
| 0
| 0.275344
| 0.007404
| 0
| 0
| 0
| 0
| 0.28361
| 1
| 0.101289
| false
| 0
| 0.009208
| 0
| 0.110497
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5ba773e71c6c947684d6137b0031664c16a46fca
| 14,679
|
py
|
Python
|
tests/unit/test_plugins/test_auto_learn_threshold.py
|
Worteks/OrangeAssassin
|
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_plugins/test_auto_learn_threshold.py
|
Worteks/OrangeAssassin
|
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_plugins/test_auto_learn_threshold.py
|
Worteks/OrangeAssassin
|
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
|
[
"Apache-2.0"
] | null | null | null |
"""Tests the pad.plugins.auto_learn_threshold.AutoLearnThreshold Plugin"""
import unittest
try:
from unittest.mock import patch, Mock, MagicMock, call
except ImportError:
from mock import patch, Mock, MagicMock, call
import oa.plugins.auto_learn_threshold
class TestAutoLearnThresholdPlugin(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.local_data = {}
self.global_data = {
"bayes_auto_learn_threshold_nonspam": 0.1,
"bayes_auto_learn_threshold_spam": 12.0,
"bayes_auto_learn_on_error": True
}
self.mock_ctxt = MagicMock()
self.mock_msg = MagicMock(msg={})
self.plugin = oa.plugins.auto_learn_threshold.AutoLearnThreshold(self.mock_ctxt)
self.plugin.set_local = lambda m, k, v: self.local_data.__setitem__(k,
v)
self.plugin.get_local = lambda m, k: self.local_data.__getitem__(k)
self.plugin.set_global = self.global_data.__setitem__
self.plugin.get_global = self.global_data.__getitem__
self.mock_ruleset = MagicMock()
def tearDown(self):
unittest.TestCase.tearDown(self)
patch.stopall()
def test_bayes_agrees_with_ham(self):
self.local_data['learner_thinks_spam'] = False
self.local_data['bayes_thinks_ham'] = True
self.local_data['bayes_thinks_spam'] = False
self.assertTrue(self.plugin.bayes_agrees(self.mock_msg))
def test_bayes_agrees_with_spam(self):
self.local_data['learner_thinks_spam'] = True
self.local_data['bayes_thinks_ham'] = False
self.local_data['bayes_thinks_spam'] = True
self.assertTrue(self.plugin.bayes_agrees(self.mock_msg))
def test_bayes_disagrees_with_spam(self):
self.local_data['learner_thinks_spam'] = True
self.local_data['bayes_thinks_ham'] = False
self.local_data['bayes_thinks_spam'] = False
self.assertFalse(self.plugin.bayes_agrees(self.mock_msg))
def test_bayes_disagrees_with_ham(self):
self.local_data['learner_thinks_spam'] = False
self.local_data['bayes_thinks_ham'] = False
self.local_data['bayes_thinks_spam'] = True
self.assertFalse(self.plugin.bayes_agrees(self.mock_msg))
def test_valid_tests_generator(self):
tests = {
"valid": MagicMock(score=3, tflags=None),
"noautolearn": MagicMock(score=3, tflags=['noautolearn']),
"userconf": MagicMock(score=3, tflags=['userconf']),
"zero": MagicMock(score=0, tflags=None),
}
result = list(self.plugin.valid_tests(tests))
self.assertListEqual(result, [('valid',tests['valid'])])
def test_prepare_learning_metadata(self):
tests = {
"HEADER": MagicMock(score=3, tflags=None, rule_type='header'),
"BODY": MagicMock(score=5, tflags=None, rule_type='body'),
"META": MagicMock(score=7, tflags=None, rule_type='meta'),
"META_NET": MagicMock(score=11, tflags=None, rule_type='meta'),
"LEARN": MagicMock(score=13, tflags=['learn'], rule_type='body'),
"URI": MagicMock(score=17, tflags=['learn'], rule_type='uri'),
}
self.plugin.prepare_learning_metadata(self.mock_msg, tests)
expected_local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 26,
'header_points': 21,
'body_points': 53,
'learned_points': 30,
'min_body_points': 3,
'min_header_points': 3,
'autolearn_forced': False,
'learner_thinks_spam': True,
'learner_thinks_ham': False}
self.assertEqual(self.local_data, expected_local_data)
def test_prepare_learning_metadata_global(self):
tests = {
"HEADER": MagicMock(score=3, tflags=None, rule_type='header'),
"BODY": MagicMock(score=5, tflags=None, rule_type='body'),
"META": MagicMock(score=7, tflags=None, rule_type='meta'),
"META_NET": MagicMock(score=11, tflags=None, rule_type='meta'),
"LEARN": MagicMock(score=13, tflags=['learn'], rule_type='body'),
"URI": MagicMock(score=17, tflags=['learn'], rule_type='uri'),
}
self.plugin.prepare_learning_metadata(self.mock_msg, tests)
expected_global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True,
}
self.assertEqual(self.global_data, expected_global_data)
def test_prepare_learning_metadata_global_forced(self):
tests = {
"HEADER": MagicMock(score=3, tflags=['autolearn_force'], rule_type='header'),
"BODY": MagicMock(score=5, tflags=None, rule_type='body'),
"META": MagicMock(score=7, tflags=None, rule_type='meta'),
"META_NET": MagicMock(score=11, tflags=None, rule_type='meta'),
"LEARN": MagicMock(score=13, tflags=['learn'], rule_type='body'),
"URI": MagicMock(score=17, tflags=['learn'], rule_type='uri'),
}
self.plugin.prepare_learning_metadata(self.mock_msg, tests)
expected_global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True,
}
self.assertEqual(self.global_data, expected_global_data)
def test_should_learn_false_1(self):
self.mock_msg.score = 4
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True,}
self.local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 26,
'header_points': 21,
'body_points': 53,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': 30,
'autolearn_forced': False,
'learner_thinks_spam': True,
'learner_thinks_ham': False}
self.assertFalse(self.plugin.should_learn(self.mock_msg))
def test_should_learn_false_spam_few_header_points(self):
self.mock_msg.score = 6
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True}
self.local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 26,
'header_points': 2,
'body_points': 53,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': 30,
'autolearn_forced': False,
'learner_thinks_spam': True,
'learner_thinks_ham': False}
self.assertFalse(self.plugin.should_learn(self.mock_msg))
def test_should_learn_false_spam_few_body_points(self):
self.mock_msg.score = 6
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True}
self.local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 26,
'header_points': 33,
'body_points': 2,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': 30,
'autolearn_forced': False,
'learner_thinks_spam': True,
'learner_thinks_ham': False}
self.assertFalse(self.plugin.should_learn(self.mock_msg))
def test_should_learn_false_spam_few_learned_points(self):
self.mock_msg.score = 6
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True}
self.local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 26,
'header_points': 33,
'body_points': 5,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': -5,
'autolearn_forced': False,
'learner_thinks_spam': True,
'learner_thinks_ham': False}
self.assertFalse(self.plugin.should_learn(self.mock_msg))
def test_should_learn_false_ham_few_learned_points(self):
self.mock_msg.score = 4
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True}
self.local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 26,
'header_points': 33,
'body_points': 5,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': 5,
'autolearn_forced': False,
'learner_thinks_spam': False,
'learner_thinks_ham': True}
self.assertFalse(self.plugin.should_learn(self.mock_msg))
def test_should_learn_false_ham_instead_of_spam(self):
self.mock_msg.score = 6
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True}
self.local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 26,
'header_points': 33,
'body_points': 5,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': 0,
'autolearn_forced': False,
'learner_thinks_spam': False,
'learner_thinks_ham': True}
self.assertFalse(self.plugin.should_learn(self.mock_msg))
def test_should_learn_false_unsure(self):
self.mock_msg.score = 6
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True}
self.local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 5,
'header_points': 33,
'body_points': 5,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': 0,
'autolearn_forced': False,
'learner_thinks_spam': False,
'learner_thinks_ham': False}
self.assertFalse(self.plugin.should_learn(self.mock_msg))
def test_should_learn_false_bayes_agrees(self):
self.mock_msg.score = 6
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True}
self.local_data = {'bayes_thinks_spam': True,
'bayes_thinks_ham': False,
'autolearn_points': 50,
'header_points': 33,
'body_points': 5,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': 0,
'autolearn_forced': False,
'learner_thinks_spam': True,
'learner_thinks_ham': False}
self.assertFalse(self.plugin.should_learn(self.mock_msg))
def test_should_learn_True(self):
self.mock_msg.score = 6
self.mock_ctxt.conf = {'required_score': 5}
self.global_data = {'bayes_auto_learn_threshold_nonspam': 0.1,
'bayes_auto_learn_threshold_spam': 12.0,
'bayes_auto_learn_on_error': True}
self.local_data = {'bayes_thinks_spam': False,
'bayes_thinks_ham': False,
'autolearn_points': 50,
'header_points': 33,
'body_points': 5,
'min_body_points': 3,
'min_header_points': 3,
'learned_points': 0,
'autolearn_forced': False,
'learner_thinks_spam': True,
'learner_thinks_ham': False}
self.assertTrue(self.plugin.should_learn(self.mock_msg))
| 45.871875
| 89
| 0.534028
| 1,520
| 14,679
| 4.771711
| 0.073684
| 0.048394
| 0.069488
| 0.076106
| 0.886116
| 0.855784
| 0.84696
| 0.82752
| 0.814422
| 0.814422
| 0
| 0.020943
| 0.365692
| 14,679
| 319
| 90
| 46.015674
| 0.758028
| 0.004632
| 0
| 0.748148
| 0
| 0
| 0.232865
| 0.073947
| 0
| 0
| 0
| 0
| 0.062963
| 1
| 0.07037
| false
| 0
| 0.018519
| 0
| 0.092593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
752b6998443536f37c4cb9bdec919b975dae423c
| 15,650
|
py
|
Python
|
src/pythae/models/nn/benchmarks/cifar.py
|
clementchadebec/benchmark_VAE
|
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
|
[
"Apache-2.0"
] | 143
|
2021-10-17T08:43:33.000Z
|
2022-03-31T11:10:53.000Z
|
src/pythae/models/nn/benchmarks/cifar.py
|
louis-j-vincent/benchmark_VAE
|
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
|
[
"Apache-2.0"
] | 6
|
2022-01-21T17:40:09.000Z
|
2022-03-16T13:09:22.000Z
|
src/pythae/models/nn/benchmarks/cifar.py
|
louis-j-vincent/benchmark_VAE
|
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
|
[
"Apache-2.0"
] | 18
|
2021-12-16T15:17:08.000Z
|
2022-03-15T01:30:13.000Z
|
"""Proposed Neural nets architectures suited for CIFAR"""
from typing import List
import torch
import torch.nn as nn
from ....models import BaseAEConfig
from ....models.base.base_utils import ModelOutput
from ..base_architectures import BaseDecoder, BaseEncoder
class Encoder_AE_CIFAR(BaseEncoder):
"""
A Convolutional encoder Neural net suited for CIFAR and Autoencoder-based models.
It can be built as follows:
.. code-block::
>>> from pythae.models.nn.benchmarks.cifar import Encoder_AE_CIFAR
>>> from pythae.models import AEConfig
>>> model_config = AEConfig(input_dim=(3, 32, 32), latent_dim=16)
>>> encoder = Encoder_AE_CIFAR(model_config)
>>> encoder
... Encoder_AE_CIFAR(
... (layers): ModuleList(
... (0): Sequential(
... (0): Conv2d(3, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (1): Sequential(
... (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (2): Sequential(
... (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (3): Sequential(
... (0): Conv2d(512, 1024, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... )
... (embedding): Linear(in_features=4096, out_features=16, bias=True)
... )
and then passed to a :class:`pythae.models` instance
>>> from pythae.models import AE
>>> model = AE(model_config=model_config, encoder=encoder)
>>> model.encoder == encoder
... True
.. note::
Please note that this encoder is only suitable for Autoencoder based models since it only
outputs the embeddings of the input data under the key `embedding`.
.. code-block::
>>> import torch
>>> input = torch.rand(2, 3, 32, 32)
>>> out = encoder(input)
>>> out.embedding.shape
... torch.Size([2, 16])
"""
def __init__(self, args: BaseAEConfig):
BaseEncoder.__init__(self)
self.input_dim = (3, 32, 32)
self.latent_dim = args.latent_dim
self.n_channels = 3
layers = nn.ModuleList()
layers.append(
nn.Sequential(
nn.Conv2d(self.n_channels, 128, 4, 2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.Conv2d(128, 256, 4, 2, padding=1), nn.BatchNorm2d(256), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(256, 512, 4, 2, padding=1), nn.BatchNorm2d(512), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(512, 1024, 4, 2, padding=1), nn.BatchNorm2d(1024), nn.ReLU()
)
)
self.layers = layers
self.depth = len(layers)
self.embedding = nn.Linear(1024 * 2 * 2, args.latent_dim)
def forward(self, x: torch.Tensor, output_layer_levels: List[int] = None):
"""Forward method
Args:
output_layer_levels (List[int]): The levels of the layers where the outputs are
extracted. If None, the last layer's output is returned. Default: None.
Returns:
ModelOutput: An instance of ModelOutput containing the embeddings of the input data
under the key `embedding`. Optional: The outputs of the layers specified in
`output_layer_levels` arguments are available under the keys `embedding_layer_i` where
i is the layer's level."""
output = ModelOutput()
max_depth = self.depth
if output_layer_levels is not None:
assert all(
self.depth >= levels > 0 or levels == -1
for levels in output_layer_levels
), (
f"Cannot output layer deeper than depth ({self.depth}). "
f"Got ({output_layer_levels})."
)
if -1 in output_layer_levels:
max_depth = self.depth
else:
max_depth = max(output_layer_levels)
out = x
for i in range(max_depth):
out = self.layers[i](out)
if output_layer_levels is not None:
if i + 1 in output_layer_levels:
output[f"embedding_layer_{i+1}"] = out
if i + 1 == self.depth:
output["embedding"] = self.embedding(out.reshape(x.shape[0], -1))
return output
class Encoder_VAE_CIFAR(BaseEncoder):
"""
A Convolutional encoder Neural net suited for CIFAR and Variational Autoencoder-based
models.
It can be built as follows:
.. code-block::
>>> from pythae.models.nn.benchmarks.cifar import Encoder_VAE_CIFAR
>>> from pythae.models import VAEConfig
>>> model_config = VAEConfig(input_dim=(3, 32, 32), latent_dim=16)
>>> encoder = Encoder_VAE_CIFAR(model_config)
>>> encoder
... Encoder_VAE_CIFAR(
... (layers): ModuleList(
... (0): Sequential(
... (0): Conv2d(3, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (1): Sequential(
... (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (2): Sequential(
... (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (3): Sequential(
... (0): Conv2d(512, 1024, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... )
... (embedding): Linear(in_features=4096, out_features=16, bias=True)
... (log_var): Linear(in_features=4096, out_features=16, bias=True)
... )
and then passed to a :class:`pythae.models` instance
>>> from pythae.models import VAE
>>> model = VAE(model_config=model_config, encoder=encoder)
>>> model.encoder == encoder
... True
.. note::
Please note that this encoder is only suitable for Variational Autoencoder based models
since it outputs the embeddings and the **log** of the covariance diagonal coefficients
of the input data under the key `embedding` and `log_covariance`.
.. code-block::
>>> import torch
>>> input = torch.rand(2, 3, 32, 32)
>>> out = encoder(input)
>>> out.embedding.shape
... torch.Size([2, 16])
>>> out.log_covariance.shape
... torch.Size([2, 16])
"""
def __init__(self, args: BaseAEConfig):
BaseEncoder.__init__(self)
self.input_dim = (3, 32, 32)
self.latent_dim = args.latent_dim
self.n_channels = 3
layers = nn.ModuleList()
layers.append(
nn.Sequential(
nn.Conv2d(self.n_channels, 128, 4, 2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.Conv2d(128, 256, 4, 2, padding=1), nn.BatchNorm2d(256), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(256, 512, 4, 2, padding=1), nn.BatchNorm2d(512), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(512, 1024, 4, 2, padding=1), nn.BatchNorm2d(1024), nn.ReLU()
)
)
self.layers = layers
self.depth = len(layers)
self.embedding = nn.Linear(1024 * 2 * 2, args.latent_dim)
self.log_var = nn.Linear(1024 * 2 * 2, args.latent_dim)
def forward(self, x: torch.Tensor, output_layer_levels: List[int] = None):
"""Forward method
Args:
output_layer_levels (List[int]): The levels of the layers where the outputs are
extracted. If None, the last layer's output is returned. Default: None.
Returns:
ModelOutput: An instance of ModelOutput containing the embeddings of the input data
under the key `embedding` and the **log** of the diagonal coefficient of the covariance
matrices under the key `log_covariance`. Optional: The outputs of the layers specified
in `output_layer_levels` arguments are available under the keys `embedding_layer_i`
where i is the layer's level.
"""
output = ModelOutput()
max_depth = self.depth
if output_layer_levels is not None:
assert all(
self.depth >= levels > 0 or levels == -1
for levels in output_layer_levels
), (
f"Cannot output layer deeper than depth ({self.depth}). "
f"Got ({output_layer_levels})."
)
if -1 in output_layer_levels:
max_depth = self.depth
else:
max_depth = max(output_layer_levels)
out = x
for i in range(max_depth):
out = self.layers[i](out)
if output_layer_levels is not None:
if i + 1 in output_layer_levels:
output[f"embedding_layer_{i+1}"] = out
if i + 1 == self.depth:
output["embedding"] = self.embedding(out.reshape(x.shape[0], -1))
output["log_covariance"] = self.log_var(out.reshape(x.shape[0], -1))
return output
class Decoder_AE_CIFAR(BaseDecoder):
"""
A Convolutional decoder Neural net suited for CIFAR and Autoencoder-based
models.
It can be built as follows:
.. code-block::
>>> from pythae.models.nn.benchmarks.cifar import Decoder_AE_CIFAR
>>> from pythae.models import VAEConfig
>>> model_config = VAEConfig(input_dim=(3, 32, 32), latent_dim=16)
>>> decoder = Decoder_AE_CIFAR(model_config)
>>> decoder
... Decoder_AE_CIFAR(
... (layers): ModuleList(
... (0): Linear(in_features=16, out_features=65536, bias=True)
... (1): Sequential(
... (0): ConvTranspose2d(1024, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (2): Sequential(
... (0): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))
... (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (3): Sequential(
... (0): ConvTranspose2d(256, 3, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
... (1): Sigmoid()
... )
... )
... )
and then passed to a :class:`pythae.models` instance
>>> from pythae.models import VAE
>>> model = VAE(model_config=model_config, decoder=decoder)
>>> model.decoder == decoder
... True
.. note::
Please note that this decoder is suitable for **all** models.
.. code-block::
>>> import torch
>>> input = torch.randn(2, 16)
>>> out = decoder(input)
>>> out.reconstruction.shape
... torch.Size([2, 3, 32, 32])
"""
def __init__(self, args: dict):
BaseDecoder.__init__(self)
self.input_dim = (3, 32, 32)
self.latent_dim = args.latent_dim
self.n_channels = 3
layers = nn.ModuleList()
layers.append(nn.Linear(args.latent_dim, 1024 * 8 * 8))
layers.append(
nn.Sequential(
nn.ConvTranspose2d(1024, 512, 4, 2, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.ConvTranspose2d(512, 256, 4, 2, padding=1, output_padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.ConvTranspose2d(256, self.n_channels, 4, 1, padding=2), nn.Sigmoid()
)
)
self.layers = layers
self.depth = len(layers)
def forward(self, z: torch.Tensor, output_layer_levels: List[int] = None):
"""Forward method
Args:
output_layer_levels (List[int]): The levels of the layers where the outputs are
extracted. If None, the last layer's output is returned. Default: None.
Returns:
ModelOutput: An instance of ModelOutput containing the reconstruction of the latent code
under the key `reconstruction`. Optional: The outputs of the layers specified in
`output_layer_levels` arguments are available under the keys `reconstruction_layer_i`
where i is the layer's level.
"""
output = ModelOutput()
max_depth = self.depth
if output_layer_levels is not None:
assert all(
self.depth >= levels > 0 or levels == -1
for levels in output_layer_levels
), (
f"Cannot output layer deeper than depth ({self.depth}). "
f"Got ({output_layer_levels})."
)
if -1 in output_layer_levels:
max_depth = self.depth
else:
max_depth = max(output_layer_levels)
out = z
for i in range(max_depth):
out = self.layers[i](out)
if i == 0:
out = out.reshape(z.shape[0], 1024, 8, 8)
if output_layer_levels is not None:
if i + 1 in output_layer_levels:
output[f"reconstruction_layer_{i+1}"] = out
if i + 1 == self.depth:
output["reconstruction"] = out
return output
| 34.320175
| 126
| 0.522109
| 1,797
| 15,650
| 4.425153
| 0.095715
| 0.045649
| 0.064135
| 0.028672
| 0.859406
| 0.834256
| 0.818159
| 0.813883
| 0.810111
| 0.798541
| 0
| 0.056858
| 0.353802
| 15,650
| 455
| 127
| 34.395604
| 0.729457
| 0.523706
| 0
| 0.714286
| 0
| 0
| 0.054372
| 0.021145
| 0
| 0
| 0
| 0
| 0.017143
| 1
| 0.034286
| false
| 0
| 0.034286
| 0
| 0.102857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7533e29f1c4ab219c04243659475acd93af70868
| 6,589
|
py
|
Python
|
pynars/NARS/RuleMap/Interface/Interface_CompositionalRules.py
|
AIxer/PyNARS
|
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
|
[
"MIT"
] | null | null | null |
pynars/NARS/RuleMap/Interface/Interface_CompositionalRules.py
|
AIxer/PyNARS
|
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
|
[
"MIT"
] | null | null | null |
pynars/NARS/RuleMap/Interface/Interface_CompositionalRules.py
|
AIxer/PyNARS
|
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
|
[
"MIT"
] | null | null | null |
from pynars.NARS.DataStructures import Link, TaskLink, TermLink, LinkType, Task
from pynars.Narsese import Belief
from pynars.NAL.Inference import *
from pynars.NAL.Theorems import *
from pynars import Global
'''first-order With common subject'''
def _compositional__intersection_extension__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return compositional__intersection_extension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False)
def _compositional__union_extension__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return compositional__union_extension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False)
'''First-order with common predicate'''
def _compositional__intersection_intension__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return compositional__intersection_intension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False)
def _compositional__union_intension__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return compositional__union_intension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False)
'''higher-order With common subject'''
def _compositional__conjunction_extension__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return compositional__conjunction_extension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False)
def _compositional__disjunction_extension__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return compositional__disjunction_extension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False)
'''higher-order With common predicate'''
def _compositional__conjunction_intension__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return compositional__conjunction_intension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False)
def _compositional__disjunction_intension__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return compositional__disjunction_intension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False)
'''Theorems'''
'''structural rules'''
def _structural__bi_composition__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__bi_composition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False)
def _structural__bi_composition__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__bi_composition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True)
def _structural__bi_composition__0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__bi_composition_prime(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False)
def _structural__bi_composition__1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__bi_composition_prime(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True)
def _structural__uni_composition__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__uni_composition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False)
def _structural__uni_composition__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__uni_composition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True)
def _structural__uni_composition__0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__uni_composition_prime(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False)
def _structural__uni_composition__1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__uni_composition_prime(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True)
def _structural__uni_decomposition__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__uni_decomposition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False)
def _structural__uni_decomposition__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__uni_decomposition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True)
'''implication theorems'''
def _structural__implication_theorem3(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__implication_theorem3(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False)
def _structural__implication_theorem4(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None):
return structural__implication_theorem4(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False)
| 77.517647
| 191
| 0.803309
| 908
| 6,589
| 5.595815
| 0.060573
| 0.078725
| 0.070852
| 0.102342
| 0.928557
| 0.925212
| 0.900413
| 0.900413
| 0.900413
| 0.900413
| 0
| 0.005126
| 0.111853
| 6,589
| 84
| 192
| 78.440476
| 0.863124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0.444444
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
754dae5ce0dc95490fcef5514898716c7eeb08eb
| 85
|
py
|
Python
|
level0/question41.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
level0/question41.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
level0/question41.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
t = tuple([1,2,3,4,5,6,7,8,9,10])
print(t[0:int(len(t)/2)])
print(t[int(len(t)/2):])
| 21.25
| 33
| 0.541176
| 25
| 85
| 1.84
| 0.64
| 0.26087
| 0.304348
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 0.058824
| 85
| 4
| 34
| 21.25
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
755a80dfe519464419ccea15d33c0835f8e9582d
| 59
|
py
|
Python
|
tests/src/negative/redefFunc.py
|
lindlind/python-interpreter
|
ffcb38627dc128dddb04e769d0bff6466365271a
|
[
"MIT"
] | null | null | null |
tests/src/negative/redefFunc.py
|
lindlind/python-interpreter
|
ffcb38627dc128dddb04e769d0bff6466365271a
|
[
"MIT"
] | null | null | null |
tests/src/negative/redefFunc.py
|
lindlind/python-interpreter
|
ffcb38627dc128dddb04e769d0bff6466365271a
|
[
"MIT"
] | null | null | null |
def f() -> int:
return 0
def f() -> int:
return 1
| 9.833333
| 15
| 0.474576
| 10
| 59
| 2.8
| 0.6
| 0.285714
| 0.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.355932
| 59
| 5
| 16
| 11.8
| 0.684211
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
f34117a06764c84263695b2775d3d1a0adda7923
| 2,215
|
py
|
Python
|
syn/repositories.py
|
fendouai/jpush-docs-old
|
3160e16ccda878be39528e41579b95f246e6e30f
|
[
"MIT"
] | null | null | null |
syn/repositories.py
|
fendouai/jpush-docs-old
|
3160e16ccda878be39528e41579b95f246e6e30f
|
[
"MIT"
] | null | null | null |
syn/repositories.py
|
fendouai/jpush-docs-old
|
3160e16ccda878be39528e41579b95f246e6e30f
|
[
"MIT"
] | 1
|
2019-06-13T09:27:48.000Z
|
2019-06-13T09:27:48.000Z
|
import os
#claim the repositories's name and url for the downloader to download
#jpush server
repositories={}
repositories["jpush-api-csharp-client"]={}
repositories["jpush-api-csharp-client"]["name"]="jpush-api-csharp-client"
repositories["jpush-api-csharp-client"]["url"]="https://github.com/jpush/jpush-api-csharp-client"
repositories["jpush-api-python-client"]={}
repositories["jpush-api-python-client"]["name"]="jpush-api-python-client"
repositories["jpush-api-python-client"]["url"]="https://github.com/jpush/jpush-api-python-client"
repositories["jpush-api-php-client"]={}
repositories["jpush-api-php-client"]["name"]="jpush-api-php-client"
repositories["jpush-api-php-client"]["url"]="https://github.com/jpush/jpush-api-php-client"
repositories["jpush-api-nodejs-client"]={}
repositories["jpush-api-nodejs-client"]["name"]="jpush-api-nodejs-client"
repositories["jpush-api-nodejs-client"]["url"]="https://github.com/jpush/jpush-api-nodejs-client"
repositories["jpush-api-java-client"]={}
repositories["jpush-api-java-client"]["name"]="jpush-api-java-client"
repositories["jpush-api-java-client"]["url"]="https://github.com/jpush/jpush-api-java-client"
repositories["jpush-api-ruby-client"]={}
repositories["jpush-api-ruby-client"]["name"]="jpush-api-ruby-client"
repositories["jpush-api-ruby-client"]["url"]="https://github.com/jpush/jpush-api-ruby-client"
#jpush client
'''
repositories["jpush-react-plugin"]={}
repositories["jpush-react-plugin"]["name"]="jpush-react-plugin"
repositories["jpush-react-plugin"]["url"]="https://github.com/jpush/jpush-react-plugin"
repositories["jpush-phonegap-plugin"]={}
repositories["jpush-phonegap-plugin"]["name"]="jpush-phonegap-plugin"
repositories["jpush-phonegap-plugin"]["url"]="https://github.com/jpush/jpush-phonegap-plugin"
repositories["jpush-unity3d-plugin"]={}
repositories["jpush-unity3d-plugin"]["name"]="jpush-unity3d-plugin"
repositories["jpush-unity3d-plugin"]["url"]="https://github.com/jpush/jpush-unity3d-plugin"
repositories["jpush-cocos2d-x-plugin"]={}
repositories["jpush-cocos2d-x-plugin"]["name"]="jpush-cocos2d-x-plugin"
repositories["jpush-cocos2d-x-plugin"]["url"]="https://github.com/jpush/jpush-cocos2d-x-plugin"
'''
| 36.311475
| 97
| 0.742212
| 294
| 2,215
| 5.591837
| 0.108844
| 0.310219
| 0.218978
| 0.268856
| 0.893552
| 0.85219
| 0.841241
| 0.532847
| 0.423358
| 0
| 0
| 0.004669
| 0.032957
| 2,215
| 60
| 98
| 36.916667
| 0.762838
| 0.041535
| 0
| 0
| 0
| 0
| 0.653045
| 0.342328
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f35cf5e7f20258bc2824244491d82216921efd6c
| 107
|
py
|
Python
|
dehazeExperment/main.py
|
okingjerryo/modelExperiment
|
6e6a7b6055a2e4efff7d7e599fa8b79a2ca3270e
|
[
"Apache-2.0"
] | null | null | null |
dehazeExperment/main.py
|
okingjerryo/modelExperiment
|
6e6a7b6055a2e4efff7d7e599fa8b79a2ca3270e
|
[
"Apache-2.0"
] | null | null | null |
dehazeExperment/main.py
|
okingjerryo/modelExperiment
|
6e6a7b6055a2e4efff7d7e599fa8b79a2ca3270e
|
[
"Apache-2.0"
] | 1
|
2018-01-08T03:03:32.000Z
|
2018-01-08T03:03:32.000Z
|
from dehazeExperment import dataset
from dehazeExperment import args
from dehazeExperment import estimator
| 26.75
| 37
| 0.88785
| 12
| 107
| 7.916667
| 0.5
| 0.6
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11215
| 107
| 3
| 38
| 35.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f39ad7f3763c8399081559db69d5c66b576b8a5f
| 80
|
py
|
Python
|
contracts.py
|
realjohnward/web3.py-tutorials
|
a30ac08ac2d70f4a5084de418a2ae093a95916c0
|
[
"MIT"
] | null | null | null |
contracts.py
|
realjohnward/web3.py-tutorials
|
a30ac08ac2d70f4a5084de418a2ae093a95916c0
|
[
"MIT"
] | null | null | null |
contracts.py
|
realjohnward/web3.py-tutorials
|
a30ac08ac2d70f4a5084de418a2ae093a95916c0
|
[
"MIT"
] | 2
|
2021-03-26T23:20:29.000Z
|
2021-11-12T20:59:20.000Z
|
CONTRACTS = {
"cryptopunks": "0xb47e3cd837dDF8e4c57F05d70Ab865de6e193BBB",
}
| 26.666667
| 64
| 0.775
| 3
| 80
| 20.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.309859
| 0.1125
| 80
| 3
| 65
| 26.666667
| 0.56338
| 0
| 0
| 0
| 0
| 0
| 0.654321
| 0.518519
| 0
| 0
| 0.518519
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f39fe2735f8fd4ec49a6c6f9e46226290347c2d7
| 182
|
py
|
Python
|
bugyocloudclient/endpoints/punchmarkpage.py
|
sengokyu/bugyo-cloud-client
|
4a86556abe075fe06b6bbd9c702eb97fce4bdfdd
|
[
"MIT"
] | 1
|
2021-01-14T00:38:20.000Z
|
2021-01-14T00:38:20.000Z
|
bugyocloudclient/endpoints/punchmarkpage.py
|
sengokyu/bugyo-cloud-client
|
4a86556abe075fe06b6bbd9c702eb97fce4bdfdd
|
[
"MIT"
] | null | null | null |
bugyocloudclient/endpoints/punchmarkpage.py
|
sengokyu/bugyo-cloud-client
|
4a86556abe075fe06b6bbd9c702eb97fce4bdfdd
|
[
"MIT"
] | null | null | null |
from bugyocloudclient.core import BugyoCloudClient
from bugyocloudclient.endpoints.base.tokenpage import TokenPage
class PunchmarkPage(TokenPage):
""" タイムカードページです """
pass
| 22.75
| 63
| 0.796703
| 17
| 182
| 8.529412
| 0.647059
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 182
| 7
| 64
| 26
| 0.917722
| 0.06044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
f3abb2621361301e85628ffbba80c688a363e37f
| 6,777
|
py
|
Python
|
twitter/tests/test_views.py
|
maribedran/personal-feed
|
0203359e8521fd90c9211b73a0b2efaf0e3cd9c6
|
[
"MIT"
] | null | null | null |
twitter/tests/test_views.py
|
maribedran/personal-feed
|
0203359e8521fd90c9211b73a0b2efaf0e3cd9c6
|
[
"MIT"
] | 2
|
2020-06-05T17:42:05.000Z
|
2021-06-10T19:27:19.000Z
|
twitter/tests/test_views.py
|
maribedran/personal-feed
|
0203359e8521fd90c9211b73a0b2efaf0e3cd9c6
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from model_mommy import mommy
from common.utils.tests import TestCaseUtils
from twitter.models import Tweet, TwitterUser
from twitter.serializers import TweetGetSerializer, TwitterUserSerializer
from twitter.use_cases import NotFoundError, UnexpectedError
class AddUserViewTest(TestCaseUtils):
def setUp(self):
super().setUp()
self.url = self.reverse('twitter:add_user')
def test_invalid_post_returns_400(self):
response = self.auth_client.post(self.url, {'a': 'b'})
self.assertResponse400(response)
@patch('twitter.use_cases.AddUsersLastMonthsTweetsUseCase.execute')
@patch('twitter.use_cases.AddTwitterUserUseCase.execute')
def test_valid_post_calls_use_cases_correctly(self, *args):
mocked_user_uc, mocked_tweets_uc = args
twitter_user = mommy.make('twitter.TwitterUser')
mocked_user_uc.return_value = twitter_user
response = self.auth_client.post(self.url, {'username': 'user'})
self.assertResponse200(response)
mocked_user_uc.assert_called_once_with('user')
mocked_tweets_uc.assert_called_once_with(twitter_user)
twitter_user.refresh_from_db()
self.assertEqual(twitter_user.owners.first(), self.user)
self.assertResponseContentEqual(response, 'Success! User added to feed.')
@patch('twitter.use_cases.AddTwitterUserUseCase.execute')
def test_view_returns_error_message_for_not_found_exception(self, mocked_user_uc):
mocked_user_uc.side_effect = NotFoundError('Error Message')
response = self.auth_client.post(self.url, {'username': 'user'})
self.assertResponse400(response)
mocked_user_uc.assert_called_once_with('user')
self.assertResponseContentEqual(response, 'Error Message')
@patch('twitter.use_cases.AddTwitterUserUseCase.execute')
def test_view_returns_error_message_for_unexpected_exception(self, mocked_user_uc):
mocked_user_uc.side_effect = UnexpectedError('Error Message')
response = self.auth_client.post(self.url, {'username': 'user'})
self.assertResponse500(response)
mocked_user_uc.assert_called_once_with('user')
self.assertResponseContentEqual(response, 'Error Message')
@patch('twitter.use_cases.AddUsersLastMonthsTweetsUseCase.execute')
@patch('twitter.use_cases.AddTwitterUserUseCase.execute')
def test_view_returns_error_message_for_unexpected_exception_on_second_uc(self, *args):
mocked_user_uc, mocked_tweets_uc = args
twitter_user = mommy.make('twitter.TwitterUser')
mocked_user_uc.return_value = twitter_user
mocked_tweets_uc.side_effect = UnexpectedError('Error Message')
response = self.auth_client.post(self.url, {'username': 'user'})
self.assertResponse500(response)
mocked_user_uc.assert_called_once_with('user')
self.assertResponseContentEqual(response, 'Error Message')
mocked_tweets_uc.assert_called_once_with('User Created!')
class TwitterUserViewSetTest(TestCaseUtils):
def setUp(self):
super().setUp()
self.twitter_user = mommy.make('twitter.TwitterUser')
self.twitter_user.owners.add(self.user)
self.list_url = self.reverse('twitter:users-list')
self.detail_url = self.reverse('twitter:users-detail', self.twitter_user.id)
def test_list_returns_status_code_200(self):
response = self.auth_client.get(self.list_url)
self.assertResponse200(response)
def test_list_returns_correct_data(self):
data = TwitterUserSerializer(self.twitter_user).data
response = self.auth_client.get(self.list_url)
self.assertEqual(1, response.json()['count'])
self.assertEqual([data], response.json()['results'])
def test_list_filters_by_logged_user(self):
twitter_user = mommy.make('twitter.TwitterUser')
response = self.auth_client.get(self.list_url)
count = TwitterUser.objects.filter(owners=self.user).count()
self.assertEqual(count, response.json()['count'])
twitter_user.owners.add(self.user)
response = self.auth_client.get(self.list_url)
self.assertEqual(count + 1, response.json()['count'])
def test_detail_returns_status_code_200(self):
response = self.auth_client.get(self.detail_url)
self.assertResponse200(response)
def test_detail_returns_correct_data(self):
data = TwitterUserSerializer(self.twitter_user).data
response = self.auth_client.get(self.detail_url)
self.assertEqual(data, response.json())
def test_detail_returns_404_if_user_is_not_owner(self):
twitter_user = mommy.make('twitter.TwitterUser')
detail_url = self.reverse('twitter:users-detail', twitter_user.id)
response = self.auth_client.get(detail_url)
self.assertResponse404(response)
class TweetViewSetTest(TestCaseUtils):
def setUp(self):
super().setUp()
self.twitter_user = mommy.make('twitter.TwitterUser')
self.twitter_user.owners.add(self.user)
self.tweet = mommy.make('twitter.Tweet', user=self.twitter_user)
self.list_url = self.reverse('twitter:tweets-list')
self.detail_url = self.reverse('twitter:tweets-detail', self.tweet.id)
def test_list_returns_status_code_200(self):
response = self.auth_client.get(self.list_url)
self.assertResponse200(response)
def test_list_returns_correct_data(self):
data = TweetGetSerializer(self.tweet).data
response = self.auth_client.get(self.list_url)
self.assertEqual(1, response.json()['count'])
self.assertEqual([data], response.json()['results'])
def test_list_filters_by_logged_user(self):
tweet = mommy.make('twitter.Tweet')
response = self.auth_client.get(self.list_url)
count = Tweet.objects.filter(user__owners=self.user).count()
self.assertEqual(count, response.json()['count'])
tweet.user.owners.add(self.user)
response = self.auth_client.get(self.list_url)
self.assertEqual(count + 1, response.json()['count'])
def test_detail_returns_status_code_200(self):
response = self.auth_client.get(self.detail_url)
self.assertResponse200(response)
def test_detail_returns_correct_data(self):
data = TweetGetSerializer(self.tweet).data
response = self.auth_client.get(self.detail_url)
self.assertEqual(data, response.json())
def test_detail_returns_404_if_user_is_not_owner(self):
tweet = mommy.make('twitter.Tweet')
detail_url = self.reverse('twitter:tweets-detail', tweet.id)
response = self.auth_client.get(detail_url)
self.assertResponse404(response)
| 41.833333
| 91
| 0.720378
| 825
| 6,777
| 5.644848
| 0.135758
| 0.028559
| 0.065278
| 0.089757
| 0.835516
| 0.832295
| 0.82435
| 0.717844
| 0.717844
| 0.690788
| 0
| 0.010326
| 0.171167
| 6,777
| 161
| 92
| 42.093168
| 0.818764
| 0
| 0
| 0.715447
| 0
| 0
| 0.12085
| 0.05076
| 0
| 0
| 0
| 0
| 0.260163
| 1
| 0.162602
| false
| 0
| 0.04878
| 0
| 0.235772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f3bf21710017245e51402cb9229b53a3ba85106a
| 76
|
py
|
Python
|
sensenet/utils/__init__.py
|
jtoy/sensenet
|
adaf5d22ffe37f5d5d5b889b4d00760588629ec4
|
[
"MIT"
] | 59
|
2017-11-24T16:56:18.000Z
|
2022-02-25T19:33:51.000Z
|
sensenet/utils/__init__.py
|
VicksD1996/sensenet
|
adaf5d22ffe37f5d5d5b889b4d00760588629ec4
|
[
"MIT"
] | 31
|
2017-11-24T19:11:47.000Z
|
2018-07-02T19:25:08.000Z
|
sensenet/utils/__init__.py
|
VicksD1996/sensenet
|
adaf5d22ffe37f5d5d5b889b4d00760588629ec4
|
[
"MIT"
] | 22
|
2017-11-25T11:01:45.000Z
|
2022-02-25T19:33:52.000Z
|
from sensenet.utils.closer import Closer
from sensenet.utils import reraise
| 25.333333
| 40
| 0.855263
| 11
| 76
| 5.909091
| 0.545455
| 0.369231
| 0.523077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 41
| 38
| 0.955882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
34331eb40ac577ee3e2bf243e1e8dce7969d2b19
| 23,315
|
py
|
Python
|
lannerpsp/sdk_hwm.py
|
lanneriotsw/psp-api-python
|
24806866ef6335bbe852da9133a0b580f4d77909
|
[
"MIT"
] | null | null | null |
lannerpsp/sdk_hwm.py
|
lanneriotsw/psp-api-python
|
24806866ef6335bbe852da9133a0b580f4d77909
|
[
"MIT"
] | null | null | null |
lannerpsp/sdk_hwm.py
|
lanneriotsw/psp-api-python
|
24806866ef6335bbe852da9133a0b580f4d77909
|
[
"MIT"
] | null | null | null |
import logging
from configparser import ConfigParser
from ctypes import byref, c_float, c_uint16
from typing import List
from .lmbinc import PSP
logger = logging.getLogger(__name__)
class HardwareMonitor:
"""
Hardware Monitor.
sdk/src_utils/sdk_hwm/sdk_hwm.c
:param lmb_io_path: path of liblmbio.so
:param lmb_api_path: path of liblmbapi.so
"""
def __init__(self,
lmb_io_path: str = "/opt/lanner/psp/bin/amd64/lib/liblmbio.so",
lmb_api_path: str = "/opt/lanner/psp/bin/amd64/lib/liblmbapi.so") -> None:
self._lmb_io_path = lmb_io_path
self._lmb_api_path = lmb_api_path
self._f_temp = c_float()
self._w_data = c_uint16()
self._w_rpm = c_uint16()
@classmethod
def _str_replace(cls, source: str) -> float:
"""Replace str to float from hwm.conf"""
result = 1.0
for element in source.split("*"):
result *= float(element.strip())
return result
def get_cpu_temp(self, num: int) -> int:
"""Get CPU temperature."""
# Check type.
if not isinstance(num, int):
raise TypeError("'num' type must be int")
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_GetCpuTemp(num, byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_GetCpuTemp", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"CPU-{num} temperature = {int(self._f_temp.value):d}")
return int(self._f_temp.value)
def get_sys_temp(self, num: int) -> int:
"""Get SYS temperature."""
# Check type.
if not isinstance(num, int):
raise TypeError("'num' type must be int")
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_GetSysTemp(num, byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_GetSysTemp", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"SYS-{num} temperature = {int(self._f_temp.value):d}")
return int(self._f_temp.value)
def get_vcore(self, num: int) -> float:
"""Get CPU core voltage."""
# Check type.
if not isinstance(num, int):
raise TypeError("'num' type must be int")
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_GetVcore(num, byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_GetVcore", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"CPU-{num} Vcore = {self._f_temp.value:2.3f}")
return self._f_temp.value
def get_12v(self) -> float:
"""Get 12V voltage."""
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_Get12V(byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_Get12V", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"12V = {self._f_temp.value:2.3f}")
return self._f_temp.value
def get_5v(self) -> float:
"""Get 5V voltage."""
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_Get5V(byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_Get5V", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"5V = {self._f_temp.value:2.3f}")
return self._f_temp.value
def get_3v3(self) -> float:
"""Get 3.3V voltage."""
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_Get3V3(byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_Get3V3", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"3.3V = {self._f_temp.value:2.3f}")
return self._f_temp.value
def get_5vsb(self) -> float:
"""Get 5Vsb voltage."""
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_Get5Vsb(byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_Get5Vsb", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"5VSB = {self._f_temp.value:2.3f}")
return self._f_temp.value
def get_3v3sb(self) -> float:
"""Get 3.3Vsb voltage."""
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_Get3V3sb(byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_Get3V3sb", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"3.3VSB = {self._f_temp.value:2.3f}")
return self._f_temp.value
def get_vbat(self) -> float:
"""Get Vbat voltage."""
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_GetVbat(byref(self._f_temp))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_GetVbat", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"Vbat = {self._f_temp.value:2.3f}")
return self._f_temp.value
def get_power_supply(self, num: int) -> int:
"""Get Power Supply voltage."""
# Check type.
if not isinstance(num, int):
raise TypeError("'num' type must be int")
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
i_ret = psp.lib.LMB_HWM_GetPowerSupply(num, byref(self._w_data))
if i_ret != PSP.ERR_Success:
error_message = PSP.get_error_message("LMB_HWM_GetPowerSupply", i_ret)
logger.error(error_message)
raise PSP.PSPError(error_message)
logger.debug(f"PowerSupply {num} AC voltage = {self._f_temp.value:d}")
return self._w_data.value
def testhwm(self, conf_path: str = "/opt/lanner/psp/bin/amd64/utils/hwm.conf") -> None:
"""For hardware monitor testing."""
cp = ConfigParser()
cp.read(conf_path)
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
# Temperature.
if psp.lib.LMB_HWM_GetCpuTemp(1, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_CPU1_Temp"]["min"])
max_ = self._str_replace(cp["HWM_CPU1_Temp"]["max"])
print(f"CPU-1 temperature = {int(self._f_temp.value):3d} C\t"
f"(min = {min_:3.0f} C, max = {max_:3.0f} C)")
if psp.lib.LMB_HWM_GetCpuTemp(2, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_CPU2_Temp"]["min"])
max_ = self._str_replace(cp["HWM_CPU2_Temp"]["max"])
print(f"CPU-2 temperature = {int(self._f_temp.value):3d} C\t"
f"(min = {min_:3.0f} C, max = {max_:3.0f} C)")
if psp.lib.LMB_HWM_GetSysTemp(1, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_SYS1_Temp"]["min"])
max_ = self._str_replace(cp["HWM_SYS1_Temp"]["max"])
print(f"SYS-1 temperature = {int(self._f_temp.value):3d} C\t"
f"(min = {min_:3.0f} C, max = {max_:3.0f} C)")
if psp.lib.LMB_HWM_GetSysTemp(2, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_SYS2_Temp"]["min"])
max_ = self._str_replace(cp["HWM_SYS2_Temp"]["max"])
print(f"SYS-2 temperature = {int(self._f_temp.value):3d} C\t"
f"(min = {min_:3.0f} C, max = {max_:3.0f} C)")
# Voltage.
if psp.lib.LMB_HWM_GetVcore(1, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_Core1_volt"]["min"])
max_ = self._str_replace(cp["HWM_Core1_volt"]["max"])
print(f"CPU-1 Vcore = {self._f_temp.value:7.3f} V\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_GetVcore(2, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_Core2_volt"]["min"])
max_ = self._str_replace(cp["HWM_Core2_volt"]["max"])
print(f"CPU-2 Vcore = {self._f_temp.value:7.3f} V\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_Get12V(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_12v_volt"]["min"])
max_ = self._str_replace(cp["HWM_12v_volt"]["max"])
print(f"12V = {self._f_temp.value:7.3f} V\t\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_Get5V(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_5v_volt"]["min"])
max_ = self._str_replace(cp["HWM_5v_volt"]["max"])
print(f"5V = {self._f_temp.value:7.3f} V\t\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_Get3V3(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_3v3_volt"]["min"])
max_ = self._str_replace(cp["HWM_3v3_volt"]["max"])
print(f"3.3V = {self._f_temp.value:7.3f} V\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_Get5Vsb(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_5vsb_volt"]["min"])
max_ = self._str_replace(cp["HWM_5vsb_volt"]["max"])
print(f"5VSB = {self._f_temp.value:7.3f} V\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_Get3V3sb(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_3v3sb_volt"]["min"])
max_ = self._str_replace(cp["HWM_3v3sb_volt"]["max"])
print(f"3.3VSB = {self._f_temp.value:7.3f} V\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_GetVbat(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_vBat_volt"]["min"])
max_ = self._str_replace(cp["HWM_vBat_volt"]["max"])
print(f"Vbat = {self._f_temp.value:7.3f} V\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_GetVDDR(1, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_vddr_volt"]["min"])
max_ = self._str_replace(cp["HWM_vddr_volt"]["max"])
print(f"VDDR = {self._f_temp.value:7.3f} V\t\t"
f"(min = {min_:7.3f} V, max = {max_:7.3f} V)")
if psp.lib.LMB_HWM_GetPowerSupply(1, byref(self._w_data)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_PSU1_volt"]["min"])
max_ = self._str_replace(cp["HWM_PSU1_volt"]["max"])
print(f"PowerSupply 1 AC voltage = {self._w_data.value:3d} V\t"
f"(min = {min_:3.0f} V, max = {max_:3.0f} V)")
if psp.lib.LMB_HWM_GetPowerSupply(2, byref(self._w_data)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_PSU2_volt"]["min"])
max_ = self._str_replace(cp["HWM_PSU2_volt"]["max"])
print(f"PowerSupply 2 AC voltage = {self._w_data.value:3d} V\t"
f"(min = {min_:3.0f} V, max = {max_:3.0f} V)")
# Fan RPM.
if psp.lib.LMB_HWM_GetCpuFan(1, byref(self._w_rpm)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_CPU1_RPM"]["min"])
max_ = self._str_replace(cp["HWM_CPU1_RPM"]["max"])
print(f"CPU FAN 1 speed = {self._w_rpm.value:5d} rpm\t"
f"(min = {min_:5.0f} rpm, max = {max_:5.0f} rpm)")
if psp.lib.LMB_HWM_GetCpuFan(2, byref(self._w_rpm)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_CPU2_RPM"]["min"])
max_ = self._str_replace(cp["HWM_CPU2_RPM"]["max"])
print(f"CPU FAN 2 speed = {self._w_rpm.value:5d} rpm\t"
f"(min = {min_:5.0f} rpm, max = {max_:5.0f} rpm)")
if psp.lib.LMB_HWM_GetSysFan(1, byref(self._w_rpm)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_SYS1_RPM"]["min"])
max_ = self._str_replace(cp["HWM_SYS1_RPM"]["max"])
print(f"SYS FAN 1 speed = {self._w_rpm.value:5d} rpm\t"
f"(min = {min_:5.0f} rpm, max = {max_:5.0f} rpm)")
if psp.lib.LMB_HWM_GetSysFan(2, byref(self._w_rpm)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_SYS2_RPM"]["min"])
max_ = self._str_replace(cp["HWM_SYS2_RPM"]["max"])
print(f"SYS FAN 2 speed = {self._w_rpm.value:5d} rpm\t"
f"(min = {min_:5.0f} rpm, max = {max_:5.0f} rpm)")
def get_all(self, conf_path: str = "/opt/lanner/psp/bin/amd64/utils/hwm.conf") -> List[dict]:
"""Get all exist value to list."""
cp = ConfigParser()
cp.read(conf_path)
data = []
with PSP(self._lmb_io_path, self._lmb_api_path) as psp:
# Temperature.
if psp.lib.LMB_HWM_GetCpuTemp(1, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_CPU1_Temp"]["min"])
max_ = self._str_replace(cp["HWM_CPU1_Temp"]["max"])
data.append({
"name": "CPU-1 temperature",
"current": int(self._f_temp.value),
"min": int(f"{min_:3.0f}"),
"max": int(f"{max_:3.0f}"),
})
if psp.lib.LMB_HWM_GetCpuTemp(2, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_CPU2_Temp"]["min"])
max_ = self._str_replace(cp["HWM_CPU2_Temp"]["max"])
data.append({
"name": "CPU-2 temperature",
"current": int(self._f_temp.value),
"min": int(f"{min_:3.0f}"),
"max": int(f"{max_:3.0f}"),
})
if psp.lib.LMB_HWM_GetSysTemp(1, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_SYS1_Temp"]["min"])
max_ = self._str_replace(cp["HWM_SYS1_Temp"]["max"])
data.append({
"name": "SYS-1 temperature",
"current": int(self._f_temp.value),
"min": int(f"{min_:3.0f}"),
"max": int(f"{max_:3.0f}"),
})
if psp.lib.LMB_HWM_GetSysTemp(2, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_SYS2_Temp"]["min"])
max_ = self._str_replace(cp["HWM_SYS2_Temp"]["max"])
data.append({
"name": "SYS-2 temperature",
"current": int(self._f_temp.value),
"min": int(f"{min_:3.0f}"),
"max": int(f"{max_:3.0f}"),
})
# Voltage.
if psp.lib.LMB_HWM_GetVcore(1, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_Core1_volt"]["min"])
max_ = self._str_replace(cp["HWM_Core1_volt"]["max"])
data.append({
"name": "CPU-1 Vcore",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_GetVcore(2, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_Core2_volt"]["min"])
max_ = self._str_replace(cp["HWM_Core2_volt"]["max"])
data.append({
"name": "CPU-2 Vcore",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_Get12V(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_12v_volt"]["min"])
max_ = self._str_replace(cp["HWM_12v_volt"]["max"])
data.append({
"name": "12V",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_Get5V(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_5v_volt"]["min"])
max_ = self._str_replace(cp["HWM_5v_volt"]["max"])
data.append({
"name": "5V",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_Get3V3(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_3v3_volt"]["min"])
max_ = self._str_replace(cp["HWM_3v3_volt"]["max"])
data.append({
"name": "3.3V",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_Get5Vsb(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_5vsb_volt"]["min"])
max_ = self._str_replace(cp["HWM_5vsb_volt"]["max"])
data.append({
"name": "5VSB",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_Get3V3sb(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_3v3sb_volt"]["min"])
max_ = self._str_replace(cp["HWM_3v3sb_volt"]["max"])
data.append({
"name": "3.3VSB",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_GetVbat(byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_vBat_volt"]["min"])
max_ = self._str_replace(cp["HWM_vBat_volt"]["max"])
data.append({
"name": "Vbat",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_GetVDDR(1, byref(self._f_temp)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_vddr_volt"]["min"])
max_ = self._str_replace(cp["HWM_vddr_volt"]["max"])
data.append({
"name": "VDDR",
"current": float(f"{self._f_temp.value:7.3f}"),
"min": float(f"{min_:7.3f}"),
"max": float(f"{max_:7.3f}"),
})
if psp.lib.LMB_HWM_GetPowerSupply(1, byref(self._w_data)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_PSU1_volt"]["min"])
max_ = self._str_replace(cp["HWM_PSU1_volt"]["max"])
data.append({
"name": "PowerSupply 1 AC voltage",
"current": int(self._w_data.value),
"min": int(f"{min_:3.0f}"),
"max": int(f"{max_:3.0f}"),
})
if psp.lib.LMB_HWM_GetPowerSupply(2, byref(self._w_data)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_PSU2_volt"]["min"])
max_ = self._str_replace(cp["HWM_PSU2_volt"]["max"])
data.append({
"name": "PowerSupply 2 AC voltage",
"current": int(self._w_data.value),
"min": int(f"{min_:3.0f}"),
"max": int(f"{max_:3.0f}"),
})
# Fan RPM.
if psp.lib.LMB_HWM_GetCpuFan(1, byref(self._w_rpm)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_CPU1_RPM"]["min"])
max_ = self._str_replace(cp["HWM_CPU1_RPM"]["max"])
data.append({
"name": "CPU FAN 1 speed",
"current": int(self._w_rpm.value),
"min": int(f"{min_:5.0f}"),
"max": int(f"{max_:5.0f}"),
})
if psp.lib.LMB_HWM_GetCpuFan(2, byref(self._w_rpm)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_CPU2_RPM"]["min"])
max_ = self._str_replace(cp["HWM_CPU2_RPM"]["max"])
data.append({
"name": "CPU FAN 2 speed",
"current": int(self._w_rpm.value),
"min": int(f"{min_:5.0f}"),
"max": int(f"{max_:5.0f}"),
})
if psp.lib.LMB_HWM_GetSysFan(1, byref(self._w_rpm)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_SYS1_RPM"]["min"])
max_ = self._str_replace(cp["HWM_SYS1_RPM"]["max"])
data.append({
"name": "SYS FAN 1 speed",
"current": int(self._w_rpm.value),
"min": int(f"{min_:5.0f}"),
"max": int(f"{max_:5.0f}"),
})
if psp.lib.LMB_HWM_GetSysFan(2, byref(self._w_rpm)) == PSP.ERR_Success:
min_ = self._str_replace(cp["HWM_SYS2_RPM"]["min"])
max_ = self._str_replace(cp["HWM_SYS2_RPM"]["max"])
data.append({
"name": "SYS FAN 2 speed",
"current": int(self._w_rpm.value),
"min": int(f"{min_:5.0f}"),
"max": int(f"{max_:5.0f}"),
})
return data
| 52.393258
| 97
| 0.521767
| 3,172
| 23,315
| 3.50599
| 0.047919
| 0.036418
| 0.065552
| 0.109343
| 0.911069
| 0.890118
| 0.857117
| 0.847496
| 0.842101
| 0.838594
| 0
| 0.02726
| 0.32344
| 23,315
| 444
| 98
| 52.511261
| 0.677761
| 0.022947
| 0
| 0.736709
| 0
| 0.053165
| 0.214393
| 0.049916
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035443
| false
| 0
| 0.012658
| 0
| 0.081013
| 0.048101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3435b0070adcd55d058d125026f4ff38707b8d9f
| 208
|
py
|
Python
|
unityagents.py
|
PranayKr/Bonsai_CodeBase
|
4e9a7281364bd71f7cd466634ef8379eacb1b716
|
[
"Apache-2.0"
] | null | null | null |
unityagents.py
|
PranayKr/Bonsai_CodeBase
|
4e9a7281364bd71f7cd466634ef8379eacb1b716
|
[
"Apache-2.0"
] | null | null | null |
unityagents.py
|
PranayKr/Bonsai_CodeBase
|
4e9a7281364bd71f7cd466634ef8379eacb1b716
|
[
"Apache-2.0"
] | null | null | null |
#from .environment import *
#from .brain import *
#from .exception import *
#from .curriculum import *
from environment import *
from brain import *
from exception import *
from curriculum import *
| 20.8
| 28
| 0.721154
| 24
| 208
| 6.25
| 0.25
| 0.466667
| 0.28
| 0.333333
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0.201923
| 208
| 10
| 29
| 20.8
| 0.903614
| 0.456731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 13
|
3473637d52cd481940880cd78d96bd32c37f320c
| 28,643
|
py
|
Python
|
alistock/views.py
|
taojy123/AliStock
|
1bedb6fbc985b1e062a7a9c04ea7a23e56f7821c
|
[
"MIT"
] | null | null | null |
alistock/views.py
|
taojy123/AliStock
|
1bedb6fbc985b1e062a7a9c04ea7a23e56f7821c
|
[
"MIT"
] | null | null | null |
alistock/views.py
|
taojy123/AliStock
|
1bedb6fbc985b1e062a7a9c04ea7a23e56f7821c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Sum
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.contrib import auth
from django.contrib.sites.models import Site
from django.conf import settings
from models import *
import datetime
import xlwt
import os
import uuid
import StringIO
def ip_required(func=None):
def _d(request, *args, **kwargs):
if request.user.is_staff:
return func(request, *args, **kwargs)
allow_ip = Site.objects.filter(domain='allow_ip').first().name
client_ip = request.META.get('REMOTE_ADDR', '1.1.1.1')
if client_ip[:-3] in allow_ip:
return func(request, *args, **kwargs)
return HttpResponseRedirect(settings.LOGIN_URL)
return _d
def index(request):
client_ip = request.META.get('REMOTE_ADDR', '1.1.1.1')
return render_to_response('index.html', locals())
# ======== Product =====================
@login_required()
def product_list(request):
products = Product.objects.order_by('-update_time')
for product in products:
purchase_sum = product.purchase_set.all().aggregate(sum=Sum('quantity')).get('sum', 0) or 0
sale_sum = product.sale_set.all().aggregate(sum=Sum('quantity')).get('sum', 0) or 0
stock = purchase_sum - sale_sum
if stock != product.stock:
product.stock = stock
product.save()
return render_to_response('product_list.html', locals())
@login_required()
def product_add(request):
pid = request.POST.get("pid")
name = request.POST.get("name")
color = request.POST.get("color")
size = request.POST.get("size")
pattern = request.POST.get("pattern")
url = request.POST.get("url")
price = request.POST.get("price") or 0
special = request.POST.get("special") or 0
extra = request.POST.get("extra")
product = Product()
product.pid = pid
product.name = name
product.color = color
product.size = size
product.pattern = pattern
product.url = url
product.price = price
product.special = special
product.extra = extra
product.save()
return HttpResponseRedirect("/product/list/")
@login_required()
def product_del(request, id):
Product.objects.filter(id=id).delete()
return HttpResponseRedirect("/product/list/")
@login_required()
def product_update(request):
id = request.POST.get("id")
pid = request.POST.get("pid")
name = request.POST.get("name")
color = request.POST.get("color")
size = request.POST.get("size")
pattern = request.POST.get("pattern")
url = request.POST.get("url")
price = request.POST.get("price") or 0
special = request.POST.get("special") or 0
extra = request.POST.get("extra")
product = Product.objects.get(id=id)
product.pid = pid
product.name = name
product.color = color
product.size = size
product.pattern = pattern
product.url = url
product.price = price
product.special = special
product.extra = extra
product.save()
return HttpResponseRedirect("/product/list/")
# ======== Purchase =====================
@login_required()
def purchase_list(request):
purchases = Purchase.objects.order_by('-create_time')
products = Product.objects.order_by('name', 'color', 'size', 'pattern')
return render_to_response('purchase_list.html', locals())
@login_required()
def purchase_add(request):
product_id = request.POST.get("product_id")
quantity = request.POST.get("quantity") or 0
price = request.POST.get("price")
comment = request.POST.get("comment")
product = Product.objects.get(id=product_id)
if not price:
quantity = int(quantity)
price = quantity * product.price
purchase = Purchase()
purchase.product = product
purchase.quantity = quantity
purchase.price = price
purchase.comment = comment
purchase.save()
return HttpResponseRedirect("/purchase/list/")
@login_required()
def purchase_del(request, id):
Purchase.objects.filter(id=id).delete()
return HttpResponseRedirect("/purchase/list/")
@login_required()
def purchase_update(request):
id = request.POST.get("id")
product_id = request.POST.get("product_id")
quantity = request.POST.get("quantity") or 0
price = request.POST.get("price")
comment = request.POST.get("comment")
product = Product.objects.get(id=product_id)
if not price:
quantity = int(quantity)
price = quantity * product.price
purchase = Purchase.objects.get(id=id)
purchase.product = product
purchase.quantity = quantity
purchase.price = price
purchase.comment = comment
purchase.save()
return HttpResponseRedirect("/purchase/list/")
# ======== Sale =====================
@ip_required
def sale_list(request):
sales = Sale.objects.order_by('-create_time')
products = Product.objects.order_by('name', 'color', 'size', 'pattern')
return render_to_response('sale_list.html', locals())
@ip_required
def sale_add(request):
product_id = request.POST.get("product_id")
quantity = request.POST.get("quantity") or 0
price = request.POST.get("price")
comment = request.POST.get("comment")
product = Product.objects.get(id=product_id)
if not price:
quantity = int(quantity)
price = quantity * product.price
sale = Sale()
sale.product = product
sale.quantity = quantity
sale.price = price
sale.comment = comment
sale.save()
return HttpResponseRedirect("/sale/list/")
@ip_required
def sale_del(request, id):
Sale.objects.filter(id=id).delete()
return HttpResponseRedirect("/sale/list/")
@ip_required
def sale_update(request):
id = request.POST.get("id")
product_id = request.POST.get("product_id")
quantity = request.POST.get("quantity") or 0
price = request.POST.get("price")
comment = request.POST.get("comment")
product = Product.objects.get(id=product_id)
if not price:
quantity = int(quantity)
price = quantity * product.price
sale = Sale.objects.get(id=id)
sale.product = product
sale.quantity = quantity
sale.price = price
sale.comment = comment
sale.save()
return HttpResponseRedirect("/sale/list/")
@ip_required
def quick_input(request):
if request.method == 'POST':
client_ip = request.META.get('REMOTE_ADDR', '1.1.1.1')
data = request.POST.get('data')
data = data.strip().upper()
product = Product.objects.filter(extra=data).first()
if not product:
return HttpResponse(u'没有对应产品')
sale = Sale()
sale.product = product
sale.quantity = 1
sale.price = product.current_price
if product.special > 0:
sale.comment = u'<span style="color: red">[特价]</span>快速录入(%s)' % client_ip
sale.is_special = True
result = u'<span style="color: red">[特价]</span>1份 %s , 录入成功' % product.name
else:
sale.comment = u'快速录入(%s)' % client_ip
sale.is_special = False
result = u'1份 %s , 录入成功' % product.name
sale.save()
return HttpResponse(result)
return render_to_response('quick_input.html', locals())
@ip_required
def quick_delete(request):
now = datetime.datetime.now()
sale = Sale.objects.order_by('-create_time').first()
if (now - sale.create_time).seconds < 600:
# 只能删除最近10分钟产生的记录
sale.delete()
return HttpResponseRedirect('/quick_input/#username')
return HttpResponse(u'删除失败,只能删除最近10分钟产生的记录,请联系管理员.<a href="/">返回首页</a>')
def report(request):
style_blue = xlwt.easyxf('pattern: pattern solid, fore_colour ice_blue;')
style_orange = xlwt.easyxf('pattern: pattern solid, fore_colour orange;')
style_red = xlwt.easyxf('pattern: pattern solid, fore_colour red;')
all_products = Product.objects.order_by('id')
now = datetime.datetime.now()
if now.month == 12:
next_month = 1
else:
next_month = now.month + 1
day_begin = now.replace(hour=0, minute=0, second=0)
day_end = now.replace(hour=23, minute=59, second=59)
month_begin = now.replace(day=1, hour=0, minute=0, second=0)
month_end = now.replace(month=next_month, day=1, hour=0, minute=0, second=0)
year_begin = now.replace(month=1, day=1, hour=0, minute=0, second=0)
year_end = now.replace(month=12, day=31, hour=23, minute=59, second=59)
w = xlwt.Workbook()
# ----------- day ------------
sheet_name = u'%d年%d月%d日' % (now.year, now.month, now.day)
ws = w.add_sheet(sheet_name)
sales = Sale.objects.filter(create_time__gte=day_begin, create_time__lte=day_end).order_by('create_time')
time_flag = '%H:%M'
product_t_price = {}
product_t_quantity = {}
special_product_t_price = {}
special_product_t_quantity = {}
t_list = []
product_list = []
special_product_list = []
for sale in sales:
product = sale.product
t = sale.create_time.strftime(time_flag)
if t not in t_list:
t_list.append(t)
if sale.is_special:
if product not in special_product_list:
special_product_list.append(product)
special_product_t_price[(product.id, t)] = sale.price
special_product_t_quantity[(product.id, t)] = special_product_t_quantity.get((product.id, t), 0) + int(sale.quantity)
else:
if product not in product_list:
product_list.append(product)
product_t_price[(product.id, t)] = sale.price
product_t_quantity[(product.id, t)] = product_t_quantity.get((product.id, t), 0) + int(sale.quantity)
for product in all_products:
if product not in product_list + special_product_list:
product_list.append(product)
i = 0
ws.write(i, 1, u'标记')
ws.write(i, 2, u'品种')
ws.write(i, 3, u'单价')
ws.write(i, 4, u'数量')
ws.write(i, 5, u'小计金额')
j = 5
for t in t_list:
j += 1
ws.write(i, j, t)
sum_quantity = 0
sum_price = 0
for product in product_list:
i += 1
ws.write(i, 1, product.extra)
ws.write(i, 2, product.name)
price = product.current_price
total_quantity = 0
total_price = 0
j = 5
for t in t_list:
price = product_t_price.get((product.id, t), product.current_price)
quantity = product_t_quantity.get((product.id, t), 0)
total_quantity += quantity
total_price += quantity * price
j += 1
if quantity:
ws.write(i, j, quantity)
ws.write(i, 3, price)
ws.write(i, 4, total_quantity)
ws.write(i, 5, total_price)
sum_quantity += total_quantity
sum_price += total_price
i += 1
ws.write(i, 3, u'正常总计', style_blue)
ws.write(i, 4, sum_quantity, style_blue)
ws.write(i, 5, sum_price, style_blue)
j = 5
for t in t_list:
j += 1
sub_quantity = 0
for product in product_list:
quantity = product_t_quantity.get((product.id, t), 0)
sub_quantity += quantity
ws.write(i, j, sub_quantity, style_blue)
ws.write_merge(1, i-1, 0, 0, u'正常品种')
# 特价
i += 1
ws.write(i, 1, u'标记')
ws.write(i, 2, u'品种')
ws.write(i, 3, u'单价')
ws.write(i, 4, u'数量')
ws.write(i, 5, u'小计金额')
j = 5
for t in t_list:
j += 1
ws.write(i, j, t)
special_start_row = i + 1
special_sum_quantity = 0
special_sum_price = 0
for product in special_product_list:
i += 1
ws.write(i, 1, product.extra)
ws.write(i, 2, product.name)
price = product.current_price
total_quantity = 0
total_price = 0
j = 5
for t in t_list:
price = special_product_t_price.get((product.id, t), product.current_price)
quantity = special_product_t_quantity.get((product.id, t), 0)
total_quantity += quantity
total_price += quantity * price
j += 1
if quantity:
ws.write(i, j, quantity)
ws.write(i, 3, price)
ws.write(i, 4, total_quantity)
ws.write(i, 5, total_price)
special_sum_quantity += total_quantity
special_sum_price += total_price
i += 1
ws.write(i, 3, u'特价总计', style_orange)
ws.write(i, 4, special_sum_quantity, style_orange)
ws.write(i, 5, special_sum_price, style_orange)
j = 5
for t in t_list:
j += 1
sub_quantity = 0
for product in special_product_list:
quantity = special_product_t_quantity.get((product.id, t), 0)
sub_quantity += quantity
ws.write(i, j, sub_quantity, style_orange)
if special_start_row <= i - 1:
ws.write_merge(special_start_row, i - 1, 0, 0, u'特价品种')
i += 1
ws.write(i, 3, u'合计', style_red)
ws.write(i, 4, sum_quantity + special_sum_quantity, style_red)
ws.write(i, 5, sum_price + special_sum_price, style_red)
# ----------- month ------------
sheet_name = u'%d年%d月' % (now.year, now.month)
ws = w.add_sheet(sheet_name)
sales = Sale.objects.filter(create_time__gte=month_begin, create_time__lte=month_end).order_by('create_time')
time_flag = '%m-%d'
product_t_price = {}
product_t_quantity = {}
special_product_t_price = {}
special_product_t_quantity = {}
t_list = []
product_list = []
special_product_list = []
for sale in sales:
product = sale.product
t = sale.create_time.strftime(time_flag)
if t not in t_list:
t_list.append(t)
if sale.is_special:
if product not in special_product_list:
special_product_list.append(product)
special_product_t_price[(product.id, t)] = sale.price
special_product_t_quantity[(product.id, t)] = special_product_t_quantity.get((product.id, t), 0) + int(sale.quantity)
else:
if product not in product_list:
product_list.append(product)
product_t_price[(product.id, t)] = sale.price
product_t_quantity[(product.id, t)] = product_t_quantity.get((product.id, t), 0) + int(sale.quantity)
for product in all_products:
if product not in product_list + special_product_list:
product_list.append(product)
i = 0
ws.write_merge(i, i + 1, 1, 1, u'标记')
ws.write_merge(i, i + 1, 2, 2, u'品种')
ws.write_merge(i, i + 1, 3, 3, u'小计数量')
ws.write_merge(i, i + 1, 4, 4, u'小计金额')
j = 4
for t in t_list:
j += 1
ws.write_merge(i, i, j, j + 2, t)
ws.write(i + 1, j, u'单价')
ws.write(i + 1, j + 1, u'数量')
ws.write(i + 1, j + 2, u'金额')
j += 2
i += 1
sum_quantity = 0
sum_price = 0
for product in product_list:
i += 1
ws.write(i, 1, product.extra)
ws.write(i, 2, product.name)
total_quantity = 0
total_price = 0
j = 4
for t in t_list:
price = product_t_price.get((product.id, t), product.current_price)
quantity = product_t_quantity.get((product.id, t), 0)
total_quantity += quantity
total_price += quantity * price
j += 1
ws.write(i, j, price)
j += 1
if quantity:
ws.write(i, j, quantity)
j += 1
if quantity:
ws.write(i, j, quantity * price)
ws.write(i, 3, total_quantity)
ws.write(i, 4, total_price)
sum_quantity += total_quantity
sum_price += total_price
i += 1
ws.write(i, 2, u'正常总计', style_blue)
ws.write(i, 3, sum_quantity, style_blue)
ws.write(i, 4, sum_price, style_blue)
j = 4
for t in t_list:
sub_quantity = 0
sub_price = 0
for product in product_list:
price = product_t_price.get((product.id, t), product.current_price)
quantity = product_t_quantity.get((product.id, t), 0)
sub_quantity += quantity
sub_price += quantity * price
j += 1
ws.write(i, j, '', style_blue)
j += 1
ws.write(i, j, sub_quantity, style_blue)
j += 1
ws.write(i, j, sub_price, style_blue)
ws.write_merge(1, i - 1, 0, 0, u'正常品种')
# 特价
i += 1
ws.write_merge(i, i + 1, 1, 1, u'标记')
ws.write_merge(i, i + 1, 2, 2, u'品种')
ws.write_merge(i, i + 1, 3, 3, u'小计数量')
ws.write_merge(i, i + 1, 4, 4, u'小计金额')
j = 4
for t in t_list:
j += 1
ws.write_merge(i, i, j, j + 2, t)
ws.write(i + 1, j, u'单价')
ws.write(i + 1, j + 1, u'数量')
ws.write(i + 1, j + 2, u'金额')
j += 2
i += 1
special_start_row = i + 1
special_sum_quantity = 0
special_sum_price = 0
for product in special_product_list:
i += 1
ws.write(i, 1, product.extra)
ws.write(i, 2, product.name)
total_quantity = 0
total_price = 0
j = 4
for t in t_list:
price = special_product_t_price.get((product.id, t), product.current_price)
quantity = special_product_t_quantity.get((product.id, t), 0)
total_quantity += quantity
total_price += quantity * price
j += 1
ws.write(i, j, price)
j += 1
if quantity:
ws.write(i, j, quantity)
j += 1
if quantity:
ws.write(i, j, quantity * price)
ws.write(i, 3, total_quantity)
ws.write(i, 4, total_price)
special_sum_quantity += total_quantity
special_sum_price += total_price
i += 1
ws.write(i, 2, u'特价总计', style_orange)
ws.write(i, 3, special_sum_quantity, style_orange)
ws.write(i, 4, special_sum_price, style_orange)
j = 4
for t in t_list:
sub_quantity = 0
sub_price = 0
for product in special_product_list:
price = special_product_t_price.get((product.id, t), product.current_price)
quantity = special_product_t_quantity.get((product.id, t), 0)
sub_quantity += quantity
sub_price += quantity * price
j += 1
ws.write(i, j, '', style_orange)
j += 1
ws.write(i, j, sub_quantity, style_orange)
j += 1
ws.write(i, j, sub_price, style_orange)
if special_start_row <= i - 1:
ws.write_merge(special_start_row, i - 1, 0, 0, u'特价品种')
i += 1
ws.write(i, 2, u'合计', style_red)
ws.write(i, 3, sum_quantity + special_sum_quantity, style_red)
ws.write(i, 4, sum_price + special_sum_price, style_red)
# ----------- year ------------
sheet_name = u'%d年' % now.year
ws = w.add_sheet(sheet_name)
sales = Sale.objects.filter(create_time__gte=year_begin, create_time__lte=year_end).order_by('create_time')
time_flag = '%Y-%m'
product_t_price = {}
product_t_quantity = {}
special_product_t_price = {}
special_product_t_quantity = {}
t_list = []
product_list = []
special_product_list = []
for sale in sales:
product = sale.product
t = sale.create_time.strftime(time_flag)
if t not in t_list:
t_list.append(t)
if sale.is_special:
if product not in special_product_list:
special_product_list.append(product)
special_product_t_price[(product.id, t)] = special_product_t_price.get((product.id, t), 0) + sale.price
special_product_t_quantity[(product.id, t)] = special_product_t_quantity.get((product.id, t), 0) + int(sale.quantity)
else:
if product not in product_list:
product_list.append(product)
product_t_price[(product.id, t)] = product_t_price.get((product.id, t), 0) + sale.price
product_t_quantity[(product.id, t)] = product_t_quantity.get((product.id, t), 0) + int(sale.quantity)
for product in all_products:
if product not in product_list + special_product_list:
product_list.append(product)
i = 0
ws.write_merge(i, i + 1, 1, 1, u'标记')
ws.write_merge(i, i + 1, 2, 2, u'品种')
ws.write_merge(i, i + 1, 3, 3, u'小计数量')
ws.write_merge(i, i + 1, 4, 4, u'小计金额')
j = 4
for t in t_list:
j += 1
ws.write_merge(i, i, j, j + 1, t)
ws.write(i + 1, j, u'数量')
ws.write(i + 1, j + 1, u'金额')
j += 1
i += 1
sum_quantity = 0
sum_price = 0
for product in product_list:
i += 1
ws.write(i, 1, product.extra)
ws.write(i, 2, product.name)
total_quantity = 0
total_price = 0
j = 4
for t in t_list:
price = product_t_price.get((product.id, t), 0)
quantity = product_t_quantity.get((product.id, t), 0)
total_quantity += quantity
total_price += price
j += 1
if quantity:
ws.write(i, j, quantity)
j += 1
if quantity:
ws.write(i, j, price)
ws.write(i, 3, total_quantity)
ws.write(i, 4, total_price)
sum_quantity += total_quantity
sum_price += total_price
i += 1
ws.write(i, 2, u'正常总计', style_blue)
ws.write(i, 3, sum_quantity, style_blue)
ws.write(i, 4, sum_price, style_blue)
j = 4
for t in t_list:
sub_quantity = 0
sub_price = 0
for product in product_list:
price = product_t_price.get((product.id, t), 0)
quantity = product_t_quantity.get((product.id, t), 0)
sub_quantity += quantity
sub_price += price
j += 1
ws.write(i, j, sub_quantity, style_blue)
j += 1
ws.write(i, j, sub_price, style_blue)
ws.write_merge(1, i - 1, 0, 0, u'正常品种')
# 特价
i += 1
ws.write_merge(i, i + 1, 1, 1, u'标记')
ws.write_merge(i, i + 1, 2, 2, u'品种')
ws.write_merge(i, i + 1, 3, 3, u'小计数量')
ws.write_merge(i, i + 1, 4, 4, u'小计金额')
j = 4
for t in t_list:
j += 1
ws.write_merge(i, i, j, j + 1, t)
ws.write(i + 1, j, u'数量')
ws.write(i + 1, j + 1, u'金额')
j += 1
i += 1
special_start_row = i + 1
special_sum_quantity = 0
special_sum_price = 0
for product in special_product_list:
i += 1
ws.write(i, 1, product.extra)
ws.write(i, 2, product.name)
total_quantity = 0
total_price = 0
j = 4
for t in t_list:
price = special_product_t_price.get((product.id, t), 0)
quantity = special_product_t_quantity.get((product.id, t), 0)
total_quantity += quantity
total_price += price
j += 1
if quantity:
ws.write(i, j, quantity)
j += 1
if quantity:
ws.write(i, j, price)
ws.write(i, 3, total_quantity)
ws.write(i, 4, total_price)
special_sum_quantity += total_quantity
special_sum_price += total_price
i += 1
ws.write(i, 2, u'特价总计', style_orange)
ws.write(i, 3, special_sum_quantity, style_orange)
ws.write(i, 4, special_sum_price, style_orange)
j = 4
for t in t_list:
sub_quantity = 0
sub_price = 0
for product in special_product_list:
price = special_product_t_price.get((product.id, t), 0)
quantity = special_product_t_quantity.get((product.id, t), 0)
sub_quantity += quantity
sub_price += price
j += 1
ws.write(i, j, sub_quantity, style_orange)
j += 1
ws.write(i, j, sub_price, style_orange)
if special_start_row <= i - 1:
ws.write_merge(special_start_row, i - 1, 0, 0, u'特价品种')
i += 1
ws.write(i, 2, u'合计', style_red)
ws.write(i, 3, sum_quantity + special_sum_quantity, style_red)
ws.write(i, 4, sum_price + special_sum_price, style_red)
# -----------------------------
# s = StringIO.StringIO()
# w.save(s)
#
# s.seek(0)
# s = s.read()
# response = HttpResponse(s, content_type="application/octet-stream")
# response['Content-Disposition'] = 'attachment; filename=%s.xls' % now.strftime('%Y-%m-%d')
#
# return response
filename = '% s.xls' % now.strftime('%Y-%m-%d')
w.save('./static/files/%s' % filename)
return HttpResponseRedirect('/static/files/%s' % filename)
# ======== auth =====================
def loginpage(request):
return render_to_response('loginpage.html', locals())
def registerpage(request):
return render_to_response('registerpage.html', locals())
def login(request):
username = request.REQUEST.get('username', '')
password = request.REQUEST.get('password', '')
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
auth.login(request, user)
return HttpResponseRedirect("/")
def logout(request):
if request.user.is_authenticated():
auth.logout(request)
return HttpResponseRedirect("/")
def register(request):
msg = ""
username = request.REQUEST.get('username')
password1 = request.REQUEST.get('password1')
password2 = request.REQUEST.get('password2')
if username and password1 and password2:
if User.objects.filter(username=username):
msg = "The user name is already registered"
return render_to_response('registerpage.html', locals())
if password1 == password2:
user = User()
user.username = username
user.set_password(password1)
user.save()
return HttpResponseRedirect("/")
msg = "You make a mistake, please re-enter"
return render_to_response('registerpage.html', locals())
@login_required(login_url="/loginpage")
def password(request):
return render_to_response('password.html', locals())
@login_required(login_url="/loginpage")
def password_reset(request):
password = request.REQUEST.get('password')
password1 = request.REQUEST.get('password1')
password2 = request.REQUEST.get('password2')
if password and password1 and password2:
if not request.user.check_password(password):
msg = "The old password is error!"
return render_to_response('password.html', locals())
if password1 == password2:
request.user.set_password(password1)
request.user.save()
return HttpResponseRedirect("/")
msg = "You make a mistake, please re-enter"
return render_to_response('password.html', locals())
def write_report(ws, sales, time_flag='%H:%M'):
product_t_quantity = {}
product_list = []
t_list = []
for sale in sales:
product = sale.product
t = sale.create_time.strftime(time_flag)
if product not in product_list:
product_list.append(product)
if t not in t_list:
t_list.append(t)
product_t_quantity[(product.id, t)] = product_t_quantity.get((product.id, t), 0) + int(sale.quantity)
j = 5
for t in t_list:
j += 1
ws.write(0, j, t)
sum_quantity = 0
sum_price = 0
i = 0
for product in product_list:
i += 1
ws.write(i, 0, i)
ws.write(i, 1, product.extra)
ws.write(i, 2, product.name)
price = product.price
total_quantity = 0
total_price = 0
j = 5
for t in t_list:
quantity = product_t_quantity.get((product.id, t), 0)
total_quantity += quantity
total_price += quantity * price
j += 1
if quantity:
ws.write(i, j, quantity)
ws.write(i, 3, total_quantity)
ws.write(i, 4, price)
ws.write(i, 5, total_price)
sum_quantity += total_quantity
sum_price += total_price
i += 1
ws.write(i, 3, sum_quantity)
ws.write(i, 5, sum_price)
j = 5
for t in t_list:
j += 1
sub_quantity = 0
for product in product_list:
quantity = product_t_quantity.get((product.id, t), 0)
sub_quantity += quantity
ws.write(i, j, sub_quantity)
| 29.257406
| 129
| 0.595294
| 3,982
| 28,643
| 4.107484
| 0.058011
| 0.058205
| 0.053314
| 0.026229
| 0.824957
| 0.793776
| 0.779225
| 0.733309
| 0.710137
| 0.69956
| 0
| 0.019814
| 0.275809
| 28,643
| 978
| 130
| 29.287321
| 0.768693
| 0.019167
| 0
| 0.791557
| 0
| 0
| 0.053331
| 0.002708
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034301
| false
| 0.026385
| 0.019789
| 0.003958
| 0.098945
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
caa9413d3fe0584ce2a185f41754ada882804b6a
| 34,339
|
py
|
Python
|
psono/restapi/tests/api_key_secret.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 48
|
2018-04-19T15:50:58.000Z
|
2022-01-23T15:58:11.000Z
|
psono/restapi/tests/api_key_secret.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 9
|
2018-09-13T14:56:18.000Z
|
2020-01-17T16:44:33.000Z
|
psono/restapi/tests/api_key_secret.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 11
|
2019-09-20T11:53:47.000Z
|
2021-07-18T22:41:31.000Z
|
from django.urls import reverse
from django.contrib.auth.hashers import make_password
from django.conf import settings
from rest_framework import status
from .base import APITestCaseExtended
from restapi import models
import random
import string
import binascii
import os
class CreateApiKeySecretTest(APITestCaseExtended):
"""
Test to create a secret (PUT)
"""
def setUp(self):
self.test_email = "test@example.com"
self.test_email_bcrypt = "a"
self.test_password = "myPassword"
self.test_authkey = "c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7" \
"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93"
self.test_public_key = "5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649"
self.test_secret_key = "a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71"
self.test_secret_key_enc = "77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422" \
"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011" \
"571a48eb"
self.test_secret_key_nonce = "f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c"
self.test_private_key = "d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c"
self.test_private_key_enc = "abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52" \
"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576" \
"a74b9b2452"
self.test_private_key_nonce = "4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9"
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key_enc,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key_enc,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',
is_email_active=True
)
self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt2 = 'a'
self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key2 = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce2 = 'a67fef1ff29eb8f866feaccad336fc6311fa4c71bc183b14c8fceff7416add99'
self.test_user_obj2 = models.User.objects.create(
username=self.test_username2,
email=self.test_email2,
email_bcrypt=self.test_email_bcrypt2,
authkey=make_password(self.test_authkey2),
public_key=self.test_public_key2,
private_key=self.test_private_key2,
private_key_nonce=self.test_private_key_nonce2,
secret_key=self.test_secret_key2,
secret_key_nonce=self.test_secret_key_nonce2,
user_sauce=self.test_user_sauce2,
is_email_active=True
)
self.test_datastore1_obj = models.Data_Store.objects.create(
user_id=self.test_user_obj.id,
type="my-type",
description= "my-description",
data= "12345",
data_nonce= ''.join(random.choice(string.ascii_lowercase) for _ in range(64)),
secret_key= ''.join(random.choice(string.ascii_lowercase) for _ in range(256)),
secret_key_nonce= ''.join(random.choice(string.ascii_lowercase) for _ in range(64)),
)
self.test_api_key_obj = models.API_Key.objects.create(
user = self.test_user_obj,
title = 'Test Title',
public_key = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
private_key = 'a123',
private_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
secret_key = 'a123',
secret_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
user_private_key = 'a123',
user_private_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
user_secret_key = 'a123',
user_secret_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
verify_key = 'a123',
read = True,
write = True,
restrict_to_secrets = True,
allow_insecure_access = True,
)
self.test_secret_obj = models.Secret.objects.create(
user_id=self.test_user_obj.id,
data='12345',
data_nonce=''.join(random.choice(string.ascii_lowercase) for _ in range(64)),
type="dummy"
)
self.secret_link_obj = models.Secret_Link.objects.create(
link_id = '0493017f-47b0-446e-9a41-6533721ade71',
secret_id = self.test_secret_obj.id,
parent_datastore_id = self.test_datastore1_obj.id,
parent_share_id = None
)
def test_create_success(self):
"""
Tests to create an api key secret
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.API_Key_Secret.objects.count(), 1)
def test_create_failure_no_title(self):
"""
Tests to create an api key secret without a title
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
# 'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_title_nonce(self):
"""
Tests to create an api key secret without a nonce for the title
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
# 'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_secret_key(self):
"""
Tests to create an api key secret without a secret key
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
# 'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_secret_key_nonce(self):
"""
Tests to create an api key secret without a secret key nonce
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
# 'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_secret_id(self):
"""
Tests to create an api key secret without a secret id
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
# 'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_api_key_id(self):
"""
Tests to create an api key secret without an api key id
"""
url = reverse('api_key_secret')
data = {
# 'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_secret_key_no_hex(self):
"""
Tests to create an api key secret with a secret key that is not in hex format
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123X',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_secret_key_nonce_no_hex(self):
"""
Tests to create an api key secret with a secret key nonce that is not in hex format
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0FX',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_title_no_hex(self):
"""
Tests to create an api key secret with a title that is not in hex format
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123X',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_title_nonce_no_hex(self):
"""
Tests to create an api key secret with a title nonce that is not in hex format
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0FX',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_api_key_not_exist(self):
"""
Tests to create an api key secret for an api key that does not exist
"""
url = reverse('api_key_secret')
data = {
'api_key_id': '2cec0bba-74b5-44a2-9b62-dfe7f678e75a',
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_permission_on_api_key(self):
"""
Tests to create an api key secret for an api key that does not belong to the user
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj2)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_permission_on_secret(self):
"""
Tests to create an api key secret for a secret that the user has no permission to
"""
self.secret_link_obj.delete()
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': self.test_secret_obj.id,
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_secret_not_exist(self):
"""
Tests to create an api key secret for a secret that does not exist
"""
url = reverse('api_key_secret')
data = {
'api_key_id': self.test_api_key_obj.id,
'secret_id': 'b9362b80-224e-4c9a-8ccd-85a84b1e3739',
'secret_key': 'a123',
'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'title': 'a123',
'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class DeleteApiKeySecretTest(APITestCaseExtended):
"""
Test to delete an api key (DELETE)
"""
def setUp(self):
self.test_email = "test@example.com"
self.test_email_bcrypt = "a"
self.test_password = "myPassword"
self.test_authkey = "c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7" \
"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93"
self.test_public_key = "5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649"
self.test_secret_key = "a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71"
self.test_secret_key_enc = "77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422" \
"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011" \
"571a48eb"
self.test_secret_key_nonce = "f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c"
self.test_private_key = "d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c"
self.test_private_key_enc = "abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52" \
"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576" \
"a74b9b2452"
self.test_private_key_nonce = "4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9"
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key_enc,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key_enc,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',
is_email_active=True
)
self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt2 = 'a'
self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key2 = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce2 = 'a67fef1ff29eb8f866feaccad336fc6311fa4c71bc183b14c8fceff7416add99'
self.test_user_obj2 = models.User.objects.create(
username=self.test_username2,
email=self.test_email2,
email_bcrypt=self.test_email_bcrypt2,
authkey=make_password(self.test_authkey2),
public_key=self.test_public_key2,
private_key=self.test_private_key2,
private_key_nonce=self.test_private_key_nonce2,
secret_key=self.test_secret_key2,
secret_key_nonce=self.test_secret_key_nonce2,
user_sauce=self.test_user_sauce2,
is_email_active=True
)
self.test_api_key_obj = models.API_Key.objects.create(
user = self.test_user_obj,
title = 'Test Title',
public_key = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
private_key = 'a123',
private_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
secret_key = 'a123',
secret_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
user_private_key = 'a123',
user_private_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
user_secret_key = 'a123',
user_secret_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
verify_key = 'a123',
read = True,
write = True,
restrict_to_secrets = True,
allow_insecure_access = True,
)
self.test_secret_obj = models.Secret.objects.create(
user_id=self.test_user_obj.id,
data='12345',
data_nonce=''.join(random.choice(string.ascii_lowercase) for _ in range(64)),
type="dummy"
)
self.test_api_key_secret_obj = models.API_Key_Secret.objects.create(
api_key=self.test_api_key_obj,
secret=self.test_secret_obj,
title='a123',
title_nonce='B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
secret_key='a123',
secret_key_nonce='B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
)
def test_delete_success(self):
"""
Tests to delete a secret from an api key
"""
url = reverse('api_key_secret')
data = {
'api_key_secret_id': self.test_api_key_secret_obj.id,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_failure_missing_api_key_secret_id(self):
"""
Tests to delete a secret from an api key without api_key_secret_id
"""
url = reverse('api_key_secret')
data = {
# 'api_key_secret_id': self.test_api_key_secret_obj.id,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_failure_belongs_to_other_user(self):
"""
Tests to delete an api key secret where the api key that belongs to another user
"""
url = reverse('api_key_secret')
data = {
'api_key_secret_id': self.test_api_key_secret_obj.id,
}
self.client.force_authenticate(user=self.test_user_obj2)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_failure_not_exist(self):
"""
Tests to delete an api key secret that does not exist
"""
url = reverse('api_key_secret')
data = {
'api_key_secret_id': '9528ca14-d916-429f-8f32-80f4bb814e3c',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class ReadApiKeySecretTest(APITestCaseExtended):
"""
Test to read an api key secret (GET)
"""
def setUp(self):
self.test_email = "test@example.com"
self.test_email_bcrypt = "a"
self.test_password = "myPassword"
self.test_authkey = "c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7" \
"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93"
self.test_public_key = "5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649"
self.test_secret_key = "a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71"
self.test_secret_key_enc = "77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422" \
"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011" \
"571a48eb"
self.test_secret_key_nonce = "f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c"
self.test_private_key = "d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c"
self.test_private_key_enc = "abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52" \
"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576" \
"a74b9b2452"
self.test_private_key_nonce = "4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9"
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key_enc,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key_enc,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',
is_email_active=True
)
self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt2 = 'a'
self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key2 = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce2 = 'a67fef1ff29eb8f866feaccad336fc6311fa4c71bc183b14c8fceff7416add99'
self.test_user_obj2 = models.User.objects.create(
username=self.test_username2,
email=self.test_email2,
email_bcrypt=self.test_email_bcrypt2,
authkey=make_password(self.test_authkey2),
public_key=self.test_public_key2,
private_key=self.test_private_key2,
private_key_nonce=self.test_private_key_nonce2,
secret_key=self.test_secret_key2,
secret_key_nonce=self.test_secret_key_nonce2,
user_sauce=self.test_user_sauce2,
is_email_active=True
)
self.test_api_key_obj = models.API_Key.objects.create(
user = self.test_user_obj,
title = 'Test Title',
public_key = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
private_key = 'a123',
private_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
secret_key = 'a123',
secret_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
user_private_key = 'a123',
user_private_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
user_secret_key = 'a123',
user_secret_key_nonce = 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
verify_key = 'a123',
read = True,
write = True,
restrict_to_secrets = True,
allow_insecure_access = True,
)
self.test_secret_obj = models.Secret.objects.create(
user_id=self.test_user_obj.id,
data='12345',
data_nonce=''.join(random.choice(string.ascii_lowercase) for _ in range(64)),
type="dummy"
)
self.test_api_key_secret_obj = models.API_Key_Secret.objects.create(
api_key=self.test_api_key_obj,
secret=self.test_secret_obj,
title='a123',
title_nonce='B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
secret_key='a123',
secret_key_nonce='B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
)
def test_read_api_key_secrets_success(self):
"""
Tests to read the secrets of a specific api_key successful
"""
url = reverse('api_key_secret', kwargs={'api_key_id': self.test_api_key_obj.id})
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
api_key_secret = response.data[0]
self.assertEqual(api_key_secret.get('id'), self.test_api_key_secret_obj.id)
self.assertEqual(api_key_secret.get('secret_id'), self.test_api_key_secret_obj.secret_id)
self.assertEqual(api_key_secret.get('title'), self.test_api_key_secret_obj.title)
self.assertEqual(api_key_secret.get('title_nonce'), self.test_api_key_secret_obj.title_nonce)
def test_read_api_key_secrets_failure_not_exist(self):
"""
Tests to read the secrets of a specific api_key that does not exist
"""
url = reverse('api_key_secret', kwargs={'api_key_id': '175a2f80-0b30-4ada-a26f-cc2e9a29384b'})
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_read_api_key_secrets_failure_no_permission(self):
"""
Tests to read the secrets of a specific api_key that belongs to another user
"""
url = reverse('api_key_secret', kwargs={'api_key_id': self.test_api_key_obj.id})
data = {}
self.client.force_authenticate(user=self.test_user_obj2)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UpdateApiKeySecretTest(APITestCaseExtended):
"""
Test to update an api key secret (POST)
"""
def setUp(self):
self.test_email = "test@example.com"
self.test_email_bcrypt = "a"
self.test_password = "myPassword"
self.test_authkey = "c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7" \
"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93"
self.test_public_key = "5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649"
self.test_secret_key = "a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71"
self.test_secret_key_enc = "77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422" \
"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011" \
"571a48eb"
self.test_secret_key_nonce = "f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c"
self.test_private_key = "d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c"
self.test_private_key_enc = "abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52" \
"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576" \
"a74b9b2452"
self.test_private_key_nonce = "4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9"
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key_enc,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key_enc,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',
is_email_active=True
)
def test_post_api_key_secret(self):
"""
Tests POST request on api_key_secret
"""
url = reverse('api_key_secret')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
| 42.498762
| 115
| 0.677975
| 3,461
| 34,339
| 6.380815
| 0.058365
| 0.081507
| 0.036406
| 0.021056
| 0.937692
| 0.935202
| 0.9309
| 0.920214
| 0.915414
| 0.904999
| 0
| 0.181477
| 0.239378
| 34,339
| 807
| 116
| 42.551425
| 0.664038
| 0.056961
| 0
| 0.814079
| 0
| 0
| 0.262317
| 0.204744
| 0
| 0
| 0
| 0
| 0.052347
| 1
| 0.048736
| false
| 0.021661
| 0.018051
| 0
| 0.074007
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cabfb1c665694bf842958d8eb66087d281a1afa8
| 164
|
py
|
Python
|
blogapp/admin.py
|
uma-shankar-gupta/blog-website
|
ddcfc8d665634a0abcd3eca645ebbcfc78b562b0
|
[
"MIT"
] | null | null | null |
blogapp/admin.py
|
uma-shankar-gupta/blog-website
|
ddcfc8d665634a0abcd3eca645ebbcfc78b562b0
|
[
"MIT"
] | null | null | null |
blogapp/admin.py
|
uma-shankar-gupta/blog-website
|
ddcfc8d665634a0abcd3eca645ebbcfc78b562b0
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Post, Like, Comment
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Like)
| 20.5
| 39
| 0.762195
| 23
| 164
| 5.434783
| 0.478261
| 0.216
| 0.408
| 0.384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140244
| 164
| 8
| 40
| 20.5
| 0.886525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
cacb9e0868176e50a30fc1f59df3908fde0c32ef
| 2,468
|
py
|
Python
|
octicons16px/gear.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | 1
|
2021-01-28T06:47:39.000Z
|
2021-01-28T06:47:39.000Z
|
octicons16px/gear.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
octicons16px/gear.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
OCTICON_GEAR = """
<svg class="octicon octicon-gear" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M7.429 1.525a6.593 6.593 0 011.142 0c.036.003.108.036.137.146l.289 1.105c.147.56.55.967.997 1.189.174.086.341.183.501.29.417.278.97.423 1.53.27l1.102-.303c.11-.03.175.016.195.046.219.31.41.641.573.989.014.031.022.11-.059.19l-.815.806c-.411.406-.562.957-.53 1.456a4.588 4.588 0 010 .582c-.032.499.119 1.05.53 1.456l.815.806c.08.08.073.159.059.19a6.494 6.494 0 01-.573.99c-.02.029-.086.074-.195.045l-1.103-.303c-.559-.153-1.112-.008-1.529.27-.16.107-.327.204-.5.29-.449.222-.851.628-.998 1.189l-.289 1.105c-.029.11-.101.143-.137.146a6.613 6.613 0 01-1.142 0c-.036-.003-.108-.037-.137-.146l-.289-1.105c-.147-.56-.55-.967-.997-1.189a4.502 4.502 0 01-.501-.29c-.417-.278-.97-.423-1.53-.27l-1.102.303c-.11.03-.175-.016-.195-.046a6.492 6.492 0 01-.573-.989c-.014-.031-.022-.11.059-.19l.815-.806c.411-.406.562-.957.53-1.456a4.587 4.587 0 010-.582c.032-.499-.119-1.05-.53-1.456l-.815-.806c-.08-.08-.073-.159-.059-.19a6.44 6.44 0 01.573-.99c.02-.029.086-.075.195-.045l1.103.303c.559.153 1.112.008 1.529-.27.16-.107.327-.204.5-.29.449-.222.851-.628.998-1.189l.289-1.105c.029-.11.101-.143.137-.146zM8 0c-.236 0-.47.01-.701.03-.743.065-1.29.615-1.458 1.261l-.29 1.106c-.017.066-.078.158-.211.224a5.994 5.994 0 00-.668.386c-.123.082-.233.09-.3.071L3.27 2.776c-.644-.177-1.392.02-1.82.63a7.977 7.977 0 00-.704 1.217c-.315.675-.111 1.422.363 1.891l.815.806c.05.048.098.147.088.294a6.084 6.084 0 000 .772c.01.147-.038.246-.088.294l-.815.806c-.474.469-.678 1.216-.363 1.891.2.428.436.835.704 1.218.428.609 1.176.806 1.82.63l1.103-.303c.066-.019.176-.011.299.071.213.143.436.272.668.386.133.066.194.158.212.224l.289 1.106c.169.646.715 1.196 1.458 1.26a8.094 8.094 0 001.402 0c.743-.064 1.29-.614 1.458-1.26l.29-1.106c.017-.066.078-.158.211-.224a5.98 5.98 0 00.668-.386c.123-.082.233-.09.3-.071l1.102.302c.644.177 1.392-.02 1.82-.63.268-.382.505-.789.704-1.217.315-.675.111-1.422-.364-1.891l-.814-.806c-.05-.048-.098-.147-.088-.294a6.1 6.1 0 000-.772c-.01-.147.039-.246.088-.294l.814-.806c.475-.469.679-1.216.364-1.891a7.992 7.992 0 00-.704-1.218c-.428-.609-1.176-.806-1.82-.63l-1.103.303c-.066.019-.176.011-.299-.071a5.991 5.991 0 00-.668-.386c-.133-.066-.194-.158-.212-.224L10.16 1.29C9.99.645 9.444.095 8.701.031A8.094 8.094 0 008 0zm1.5 8a1.5 1.5 0 11-3 0 1.5 1.5 0 013 0zM11 8a3 3 0 11-6 0 3 3 0 016 0z"></path></svg>
"""
| 493.6
| 2,443
| 0.670583
| 620
| 2,468
| 2.667742
| 0.41129
| 0.025393
| 0.019347
| 0.018138
| 0.528416
| 0.461911
| 0.444982
| 0.308343
| 0.308343
| 0.249093
| 0
| 0.624626
| 0.052269
| 2,468
| 4
| 2,444
| 617
| 0.082514
| 0
| 0
| 0
| 0
| 0.333333
| 0.991082
| 0.701662
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1b083d72f137395785064bf5f54c6f259787ea52
| 264
|
py
|
Python
|
recipe/run_test.py
|
conda-forge/django-fsm-feedstock
|
c9b6de48691eee30f65cae270837a34f8381badc
|
[
"BSD-3-Clause"
] | null | null | null |
recipe/run_test.py
|
conda-forge/django-fsm-feedstock
|
c9b6de48691eee30f65cae270837a34f8381badc
|
[
"BSD-3-Clause"
] | 5
|
2018-07-08T10:38:55.000Z
|
2021-11-06T17:17:27.000Z
|
recipe/run_test.py
|
conda-forge/django-fsm-feedstock
|
c9b6de48691eee30f65cae270837a34f8381badc
|
[
"BSD-3-Clause"
] | 2
|
2018-01-26T23:28:49.000Z
|
2018-07-08T10:38:39.000Z
|
import django
from django.conf import settings
settings.configure(INSTALLED_APPS=['django_fsm', 'django.contrib.contenttypes', 'django.contrib.auth'])
django.setup()
import django_fsm
import django_fsm.management
import django_fsm.management.commands
| 29.333333
| 104
| 0.795455
| 33
| 264
| 6.212121
| 0.454545
| 0.234146
| 0.219512
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109848
| 264
| 8
| 105
| 33
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 0.102273
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.714286
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
59ef45620dde8ef28ba649b847d90419976a7e49
| 20,523
|
py
|
Python
|
tackerclient/tests/unit/osc/v1/test_vnflcm_op_occs.py
|
openstack/python-tackerclient
|
b7f27c3dc6a8ec747d13698a3ced1dc5cc162389
|
[
"Apache-2.0"
] | 20
|
2015-10-18T02:56:36.000Z
|
2021-10-12T13:37:58.000Z
|
tackerclient/tests/unit/osc/v1/test_vnflcm_op_occs.py
|
openstack/python-tackerclient
|
b7f27c3dc6a8ec747d13698a3ced1dc5cc162389
|
[
"Apache-2.0"
] | null | null | null |
tackerclient/tests/unit/osc/v1/test_vnflcm_op_occs.py
|
openstack/python-tackerclient
|
b7f27c3dc6a8ec747d13698a3ced1dc5cc162389
|
[
"Apache-2.0"
] | 16
|
2016-03-18T08:37:28.000Z
|
2021-07-19T05:28:16.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from io import StringIO
import os
import sys
import ddt
from oslo_utils.fixture import uuidsentinel
from unittest import mock
from tackerclient.common import exceptions
from tackerclient.osc import utils as tacker_osc_utils
from tackerclient.osc.v1.vnflcm import vnflcm_op_occs
from tackerclient.tests.unit.osc import base
from tackerclient.tests.unit.osc.v1.fixture_data import client
from tackerclient.tests.unit.osc.v1 import vnflcm_op_occs_fakes
def _get_columns_vnflcm_op_occs(action='show'):
if action == 'fail':
return ['ID', 'Operation State', 'State Entered Time',
'Start Time', 'VNF Instance ID', 'Operation',
'Is Automatic Invocation', 'Is Cancel Pending',
'Error', 'Links']
elif action == 'list':
return ['ID', 'Operation State', 'VNF Instance ID',
'Operation']
else:
return ['ID', 'Operation State', 'State Entered Time',
'Start Time', 'VNF Instance ID', 'Grant ID',
'Operation', 'Is Automatic Invocation',
'Operation Parameters', 'Is Cancel Pending',
'Cancel Mode', 'Error', 'Resource Changes',
'Changed Info', 'Changed External Connectivity', 'Links']
class TestVnfLcm(base.FixturedTestCase):
client_fixture_class = client.ClientFixture
def setUp(self):
super(TestVnfLcm, self).setUp()
self.url = client.TACKER_URL
self.header = {'content-type': 'application/json'}
self.app = mock.Mock()
self.app_args = mock.Mock()
self.client_manager = self.cs
self.app.client_manager.tackerclient = self.client_manager
@ddt.ddt
class TestCancelVnfLcmOp(TestVnfLcm):
def setUp(self):
super(TestCancelVnfLcmOp, self).setUp()
self.cancel_vnf_lcm = vnflcm_op_occs.CancelVnfLcmOp(
self.app, self.app_args, cmd_name='vnflcm op cancel')
@ddt.data('GRACEFUL', 'FORCEFUL')
def test_take_action(self, cancel_mode):
"""take_action normal system test"""
arglist = ['--cancel-mode', cancel_mode,
uuidsentinel.vnf_lcm_op_occ_id]
verifylist = [('cancel_mode', cancel_mode),
('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
parsed_args = self.check_parser(
self.cancel_vnf_lcm, arglist, verifylist)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'cancel')
self.requests_mock.register_uri(
'POST', url, headers=self.header, json={})
sys.stdout = buffer = StringIO()
self.cancel_vnf_lcm.take_action(parsed_args)
actual_message = buffer.getvalue().strip()
expected_message = (
'Cancel request for LCM operation ' +
uuidsentinel.vnf_lcm_op_occ_id +
' has been accepted')
self.assertEqual(expected_message, actual_message)
def test_terminate_no_options(self):
self.assertRaises(base.ParserException, self.check_parser,
self.cancel_vnf_lcm, [], [])
def test_take_action_vnf_lcm_op_occ_id_not_found(self):
"""take_action abnomaly system test"""
arglist = [uuidsentinel.vnf_lcm_op_occ_id]
verifylist = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
parsed_args = self.check_parser(
self.cancel_vnf_lcm, arglist, verifylist)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'cancel')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=404, json={})
self.assertRaises(exceptions.TackerClientException,
self.cancel_vnf_lcm.take_action,
parsed_args)
class TestRollbackVnfLcmOp(TestVnfLcm):
def setUp(self):
super(TestRollbackVnfLcmOp, self).setUp()
self.rollback_vnf_lcm = vnflcm_op_occs.RollbackVnfLcmOp(
self.app, self.app_args, cmd_name='vnflcm op rollback')
def test_take_action(self):
"""take_action normal system test"""
arglist = [uuidsentinel.vnf_lcm_op_occ_id]
verifylist = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
parsed_args = self.check_parser(
self.rollback_vnf_lcm, arglist, verifylist)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'rollback')
self.requests_mock.register_uri(
'POST', url, headers=self.header, json={})
sys.stdout = buffer = StringIO()
self.rollback_vnf_lcm.take_action(parsed_args)
actual_message = buffer.getvalue().strip()
expected_message = (
'Rollback request for LCM operation ' +
uuidsentinel.vnf_lcm_op_occ_id +
' has been accepted')
self.assertEqual(expected_message, actual_message)
def test_take_action_vnf_lcm_op_occ_id_not_found(self):
"""take_action abnomaly system test"""
arglist = [uuidsentinel.vnf_lcm_op_occ_id]
verifylist = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
parsed_args = self.check_parser(
self.rollback_vnf_lcm, arglist, verifylist)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'rollback')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=404, json={})
self.assertRaises(exceptions.TackerClientException,
self.rollback_vnf_lcm.take_action,
parsed_args)
class TestFailVnfLcmOp(TestVnfLcm):
def setUp(self):
super(TestFailVnfLcmOp, self).setUp()
self.fail_vnf_lcm = vnflcm_op_occs.FailVnfLcmOp(
self.app, self.app_args, cmd_name='vnflcm op fail')
def test_take_action(self):
"""Test of take_action()"""
vnflcm_op_occ = vnflcm_op_occs_fakes.vnflcm_op_occ_response(
action='fail')
arg_list = [vnflcm_op_occ['id']]
verify_list = [('vnf_lcm_op_occ_id', vnflcm_op_occ['id'])]
# command param
parsed_args = self.check_parser(
self.fail_vnf_lcm, arg_list, verify_list)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
vnflcm_op_occ['id'],
'fail')
self.requests_mock.register_uri(
'POST', url, headers=self.header, json=vnflcm_op_occ)
columns, data = (self.fail_vnf_lcm.take_action(parsed_args))
expected_columns = _get_columns_vnflcm_op_occs(action='fail')
self.assertCountEqual(expected_columns, columns)
def test_take_action_vnf_lcm_op_occ_id_not_found(self):
"""Test if vnf-lcm-op-occ-id does not find"""
arg_list = [uuidsentinel.vnf_lcm_op_occ_id]
verify_list = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.fail_vnf_lcm, arg_list, verify_list)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'fail')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=404, json={})
self.assertRaises(exceptions.TackerClientException,
self.fail_vnf_lcm.take_action,
parsed_args)
def test_take_action_vnf_lcm_op_occ_state_is_conflict(self):
"""Test if vnf-lcm-op-occ state is conflict"""
arg_list = [uuidsentinel.vnf_lcm_op_occ_id]
verify_list = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.fail_vnf_lcm, arg_list, verify_list)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'fail')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=409, json={})
self.assertRaises(exceptions.TackerClientException,
self.fail_vnf_lcm.take_action,
parsed_args)
def test_take_action_vnf_lcm_op_occ_internal_server_error(self):
"""Test if request is internal server error"""
arg_list = [uuidsentinel.vnf_lcm_op_occ_id]
verify_list = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.fail_vnf_lcm, arg_list, verify_list)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'fail')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=500, json={})
self.assertRaises(exceptions.TackerClientException,
self.fail_vnf_lcm.take_action,
parsed_args)
def test_take_action_vnf_lcm_op_occ_missing_vnf_lcm_op_occ_id_argument(
self):
"""Test if vnflcm_op_occ_id is not provided"""
arg_list = []
verify_list = [('vnf_lcm_op_occ_id', arg_list)]
self.assertRaises(base.ParserException, self.check_parser,
self.fail_vnf_lcm, arg_list, verify_list)
class TestRetryVnfLcmOp(TestVnfLcm):
def setUp(self):
super(TestRetryVnfLcmOp, self).setUp()
self.retry_vnf_lcm = vnflcm_op_occs.RetryVnfLcmOp(
self.app, self.app_args, cmd_name='vnflcm op retry')
def test_take_action(self):
"""Test of take_action()"""
arg_list = [uuidsentinel.vnf_lcm_op_occ_id]
verify_list = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.retry_vnf_lcm, arg_list, verify_list)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'retry')
self.requests_mock.register_uri(
'POST', url, headers=self.header, json={})
sys.stdout = buffer = StringIO()
self.retry_vnf_lcm.take_action(parsed_args)
actual_message = buffer.getvalue().strip()
expected_message = (
'Retry request for LCM operation ' +
uuidsentinel.vnf_lcm_op_occ_id +
' has been accepted')
self.assertEqual(expected_message, actual_message)
def test_take_action_vnf_lcm_op_occ_id_not_found(self):
"""Test if vnf-lcm-op-occ-id is not found."""
arglist = [uuidsentinel.vnf_lcm_op_occ_id]
verifylist = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.retry_vnf_lcm, arglist, verifylist)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'retry')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=404, json={})
self.assertRaises(exceptions.TackerClientException,
self.retry_vnf_lcm.take_action,
parsed_args)
def test_take_action_vnf_lcm_op_occ_state_is_conflict(self):
"""Test if vnf-lcm-op-occ state is conflict"""
arg_list = [uuidsentinel.vnf_lcm_op_occ_id]
verify_list = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.retry_vnf_lcm, arg_list, verify_list)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'retry')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=409, json={})
self.assertRaises(exceptions.TackerClientException,
self.retry_vnf_lcm.take_action,
parsed_args)
def test_take_action_vnf_lcm_op_occ_internal_server_error(self):
"""Test if request is internal server error"""
arg_list = [uuidsentinel.vnf_lcm_op_occ_id]
verify_list = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.retry_vnf_lcm, arg_list, verify_list)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id,
'retry')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=500, json={})
self.assertRaises(exceptions.TackerClientException,
self.retry_vnf_lcm.take_action,
parsed_args)
def test_take_action_vnf_lcm_op_occ_missing_vnf_lcm_op_occ_id_argument(
self):
"""Test if vnflcm_op_occ_id is not provided"""
arg_list = []
verify_list = [('vnf_lcm_op_occ_id', arg_list)]
self.assertRaises(base.ParserException, self.check_parser,
self.retry_vnf_lcm, arg_list, verify_list)
class TestListVnfLcmOp(TestVnfLcm):
def setUp(self):
super(TestListVnfLcmOp, self).setUp()
self.list_vnflcm_op_occ = vnflcm_op_occs.ListVnfLcmOp(
self.app, self.app_args, cmd_name='vnflcm op list')
def test_take_action(self):
vnflcm_op_occs_obj = vnflcm_op_occs_fakes.create_vnflcm_op_occs(
count=3)
parsed_args = self.check_parser(self.list_vnflcm_op_occ, [], [])
self.requests_mock.register_uri(
'GET', os.path.join(self.url,
'vnflcm/v1/vnf_lcm_op_occs'),
json=vnflcm_op_occs_obj, headers=self.header)
actual_columns, data = self.list_vnflcm_op_occ.take_action(parsed_args)
headers, columns = tacker_osc_utils.get_column_definitions(
self.list_vnflcm_op_occ.get_attributes(), long_listing=True)
expected_data = []
for vnflcm_op_occ_obj_idx in vnflcm_op_occs_obj:
expected_data.append(vnflcm_op_occs_fakes.get_vnflcm_op_occ_data(
vnflcm_op_occ_obj_idx, columns=columns))
self.assertCountEqual(_get_columns_vnflcm_op_occs(action='list'),
actual_columns)
self.assertCountEqual(expected_data, list(data))
def test_take_action_with_filter(self):
vnflcm_op_occs_obj = vnflcm_op_occs_fakes.create_vnflcm_op_occs(
count=3)
parsed_args = self.check_parser(
self.list_vnflcm_op_occ,
["--filter", '(eq,operationState,STARTING)'],
[('filter', '(eq,operationState,STARTING)')])
self.requests_mock.register_uri(
'GET', os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs?'
'filter=(eq,operationState,STARTING)'),
json=vnflcm_op_occs_obj, headers=self.header)
actual_columns, data = self.list_vnflcm_op_occ.take_action(parsed_args)
headers, columns = tacker_osc_utils.get_column_definitions(
self.list_vnflcm_op_occ.get_attributes(), long_listing=True)
expected_data = []
for vnflcm_op_occ_obj_idx in vnflcm_op_occs_obj:
expected_data.append(vnflcm_op_occs_fakes.get_vnflcm_op_occ_data(
vnflcm_op_occ_obj_idx, columns=columns))
self.assertCountEqual(_get_columns_vnflcm_op_occs(action='list'),
actual_columns)
self.assertListItemsEqual(expected_data, list(data))
def test_take_action_with_incorrect_filter(self):
parsed_args = self.check_parser(
self.list_vnflcm_op_occ,
["--filter", '(operationState)'],
[('filter', '(operationState)')])
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs?filter=(operationState)')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=400, json={})
self.assertRaises(exceptions.TackerClientException,
self.list_vnflcm_op_occ.take_action,
parsed_args)
def test_take_action_internal_server_error(self):
parsed_args = self.check_parser(
self.list_vnflcm_op_occ,
["--filter", '(eq,operationState,STARTING)'],
[('filter', '(eq,operationState,STARTING)')])
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs?'
'filter=(eq,operationState,STARTING)')
self.requests_mock.register_uri(
'POST', url, headers=self.header, status_code=500, json={})
self.assertRaises(exceptions.TackerClientException,
self.list_vnflcm_op_occ.take_action,
parsed_args)
class TestShowVnfLcmOp(TestVnfLcm):
def setUp(self):
super(TestShowVnfLcmOp, self).setUp()
self.show_vnf_lcm_op_occs = vnflcm_op_occs.ShowVnfLcmOp(
self.app, self.app_args, cmd_name='vnflcm op show')
def test_take_action(self):
"""Test of take_action()"""
vnflcm_op_occ = vnflcm_op_occs_fakes.vnflcm_op_occ_response()
arglist = [vnflcm_op_occ['id']]
verifylist = [('vnf_lcm_op_occ_id', vnflcm_op_occ['id'])]
# command param
parsed_args = self.check_parser(
self.show_vnf_lcm_op_occs, arglist, verifylist)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
vnflcm_op_occ['id'])
self.requests_mock.register_uri(
'GET', url, headers=self.header, json=vnflcm_op_occ)
columns, data = (self.show_vnf_lcm_op_occs.take_action(parsed_args))
self.assertCountEqual(_get_columns_vnflcm_op_occs(),
columns)
def test_take_action_vnf_lcm_op_occ_id_not_found(self):
"""Test if vnf-lcm-op-occ-id does not find."""
arglist = [uuidsentinel.vnf_lcm_op_occ_id]
verifylist = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.show_vnf_lcm_op_occs, arglist, verifylist)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id)
self.requests_mock.register_uri(
'GET', url, headers=self.header, status_code=404, json={})
self.assertRaises(exceptions.TackerClientException,
self.show_vnf_lcm_op_occs.take_action,
parsed_args)
def test_take_action_internal_server_error(self):
"""Test for internal server error."""
arglist = [uuidsentinel.vnf_lcm_op_occ_id]
verifylist = [('vnf_lcm_op_occ_id', uuidsentinel.vnf_lcm_op_occ_id)]
# command param
parsed_args = self.check_parser(
self.show_vnf_lcm_op_occs, arglist, verifylist)
url = os.path.join(
self.url,
'vnflcm/v1/vnf_lcm_op_occs',
uuidsentinel.vnf_lcm_op_occ_id)
self.requests_mock.register_uri(
'GET', url, headers=self.header, status_code=500, json={})
self.assertRaises(exceptions.TackerClientException,
self.show_vnf_lcm_op_occs.take_action,
parsed_args)
| 36.068541
| 79
| 0.629391
| 2,528
| 20,523
| 4.740111
| 0.091772
| 0.067095
| 0.068764
| 0.070683
| 0.836435
| 0.802637
| 0.784612
| 0.775515
| 0.761412
| 0.733539
| 0
| 0.0043
| 0.274765
| 20,523
| 568
| 80
| 36.132042
| 0.800793
| 0.063197
| 0
| 0.721519
| 0
| 0
| 0.099848
| 0.035724
| 0
| 0
| 0
| 0
| 0.060759
| 1
| 0.075949
| false
| 0
| 0.03038
| 0
| 0.134177
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
940c8e6b58bdbe9c2bee36414c5169236167a328
| 1,004
|
py
|
Python
|
core/kryptos/strategy/signals/utils.py
|
czr1803/kryptos
|
2e8e20b81c7486283ab39df053146048f5496474
|
[
"MIT"
] | 45
|
2019-01-27T13:47:51.000Z
|
2022-03-13T00:25:21.000Z
|
core/kryptos/strategy/signals/utils.py
|
czr1803/kryptos
|
2e8e20b81c7486283ab39df053146048f5496474
|
[
"MIT"
] | 64
|
2019-01-24T06:43:13.000Z
|
2020-03-09T17:14:52.000Z
|
core/kryptos/strategy/signals/utils.py
|
produvia/kryptos
|
2e8e20b81c7486283ab39df053146048f5496474
|
[
"MIT"
] | 4
|
2019-11-21T10:49:17.000Z
|
2021-09-30T03:33:00.000Z
|
import numpy as np
from logbook import Logger
log = Logger("SIGNALS")
def cross_above(series, trigger):
if isinstance(trigger, int):
trigger = [trigger] * 3
try:
return series[-2] <= trigger[-2] and series[-1] > trigger[-1]
except IndexError:
log.warn("Not enough data to calculate cross above")
return False
def cross_below(series, trigger):
if isinstance(trigger, int):
trigger = [trigger] * 3
try:
return series[-2] >= trigger[-2] and series[-1] < trigger[-2]
except IndexError:
log.warn("Not enough data to calculate cross above")
return False
def increasing(series, period=4):
arr = series[period * -1:]
return np.all(np.diff(arr) > 0)
def decreasing(series, period=4):
arr = series[period * -1:]
return np.all(np.diff(arr) < 0)
def greater_than(series_1, series_2):
return series_1[-1] > series_2[-1]
def less_than(series_1, series_2):
return series_1[-1] < series_2[-1]
| 22.311111
| 69
| 0.63247
| 143
| 1,004
| 4.356643
| 0.307692
| 0.067416
| 0.051364
| 0.080257
| 0.81862
| 0.81862
| 0.81862
| 0.81862
| 0.81862
| 0.81862
| 0
| 0.036554
| 0.237052
| 1,004
| 44
| 70
| 22.818182
| 0.776762
| 0
| 0
| 0.482759
| 0
| 0
| 0.086653
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206897
| false
| 0
| 0.068966
| 0.068966
| 0.551724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
940f0e47e49b685832f682007d67acb82213a91d
| 329
|
py
|
Python
|
word_vectorizer/exceptions/not_understood_url_exception.py
|
RodSernaPerez/WordVectorizer
|
097b2ccfc284b39ad43f56047ee25e393b7525ec
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
word_vectorizer/exceptions/not_understood_url_exception.py
|
RodSernaPerez/WordVectorizer
|
097b2ccfc284b39ad43f56047ee25e393b7525ec
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
word_vectorizer/exceptions/not_understood_url_exception.py
|
RodSernaPerez/WordVectorizer
|
097b2ccfc284b39ad43f56047ee25e393b7525ec
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""NotUnderstoodURLException
Exception raised when the address to in the URL field of the asked model
could not be understood."""
class NotUnderstoodURLException(Exception):
"""NotUnderstoodURLException
Exception raised when the address to in the URL field of the asked model
could not be understood."""
pass
| 25.307692
| 76
| 0.762918
| 42
| 329
| 5.97619
| 0.452381
| 0.406375
| 0.318725
| 0.350598
| 0.828685
| 0.828685
| 0.828685
| 0.828685
| 0.828685
| 0.828685
| 0
| 0
| 0.18541
| 329
| 12
| 77
| 27.416667
| 0.936567
| 0.756839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
941eb1bbb14bad9f840b3d683a083579ed9b41de
| 1,068
|
py
|
Python
|
core/apps/dash/forms.py
|
ryuga/Website
|
4c666a92220f258c72339604305ffb6a64385012
|
[
"MIT"
] | 9
|
2019-12-19T17:27:00.000Z
|
2021-01-30T20:29:39.000Z
|
core/apps/dash/forms.py
|
ryuga/Website
|
4c666a92220f258c72339604305ffb6a64385012
|
[
"MIT"
] | 20
|
2020-08-01T21:58:16.000Z
|
2021-11-01T08:49:22.000Z
|
core/apps/dash/forms.py
|
ryuga/Website
|
4c666a92220f258c72339604305ffb6a64385012
|
[
"MIT"
] | 2
|
2020-08-01T08:44:36.000Z
|
2020-10-08T22:08:06.000Z
|
from django import forms
class LoginForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(
attrs={
"placeholder": "Username",
"class": "form-control"
}
))
password = forms.CharField(
widget=forms.PasswordInput(
attrs={
"placeholder": "Password",
"class": "form-control"
}
))
class SignUpForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(
attrs={
"placeholder": "Username",
"class": "form-control"
}
))
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs={
"placeholder": "Password",
"class": "form-control"
}
))
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs={
"placeholder": "Password check",
"class": "form-control"
}
))
| 24.837209
| 48
| 0.465356
| 73
| 1,068
| 6.808219
| 0.273973
| 0.140845
| 0.201207
| 0.251509
| 0.804829
| 0.804829
| 0.804829
| 0.804829
| 0.68008
| 0.68008
| 0
| 0.003226
| 0.419476
| 1,068
| 42
| 49
| 25.428571
| 0.798387
| 0
| 0
| 0.684211
| 0
| 0
| 0.174157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.236842
| 0.026316
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
943c9bfd706a2015333b355679c3c3efd44cd2f2
| 604
|
py
|
Python
|
exercicios praticos/aula011.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
exercicios praticos/aula011.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
exercicios praticos/aula011.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
print('\033[1:30:47mOla mundo!\033[m')
print('\033[7:30:47mOla mundo!\033[m')
print('\033[4:30:47mOla mundo!\033[m')
print('\033[0:30:47mOla mundo!\033[m')
print('\033[3:30:47mOla mundo!\033[m')
print('\033[1:32:46mOla mundo!\033[m')
print('\033[1:33:45mOla mundo!\033[m')
print('\033[1:31:46mOla mundo!\033[m')
print('\033[7:31:46mOla mundo!\033[m')
a = 3
b = 5
print('Os valores \33[1:31m{}\33[m e \33[1:34m{}\33[m são inteiros.'.format(a, b))
print('Os valores {} e {} são inteiros.'.format(a, b))
print('Os valores {}{}{} e {}{}{} são inteiros.'.format('\33[1:31m',a,'\33[m','\33[1:34m', b,'\33[m'))
| 40.266667
| 102
| 0.622517
| 123
| 604
| 3.056911
| 0.219512
| 0.191489
| 0.215426
| 0.297872
| 0.789894
| 0.739362
| 0.558511
| 0.226064
| 0.226064
| 0.226064
| 0
| 0.232014
| 0.07947
| 604
| 15
| 102
| 40.266667
| 0.444245
| 0
| 0
| 0
| 0
| 0.071429
| 0.695868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.857143
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
9440a637d11b1d710e76ff115ef49ec5a8c81ecc
| 126
|
py
|
Python
|
am_i_the_asshole/__init__.py
|
mirandrom/am-i-the-asshole
|
e7e4f00aa193931d45e4012db5cc65d3679faa90
|
[
"MIT"
] | 1
|
2020-10-05T16:39:18.000Z
|
2020-10-05T16:39:18.000Z
|
am_i_the_asshole/__init__.py
|
amr-amr/am-i-the-asshole
|
e7e4f00aa193931d45e4012db5cc65d3679faa90
|
[
"MIT"
] | null | null | null |
am_i_the_asshole/__init__.py
|
amr-amr/am-i-the-asshole
|
e7e4f00aa193931d45e4012db5cc65d3679faa90
|
[
"MIT"
] | null | null | null |
from am_i_the_asshole.dataset_readers import *
from am_i_the_asshole.models import *
from am_i_the_asshole.predictors import *
| 42
| 46
| 0.865079
| 22
| 126
| 4.5
| 0.454545
| 0.181818
| 0.212121
| 0.30303
| 0.636364
| 0.464646
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087302
| 126
| 3
| 47
| 42
| 0.86087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
84a448261f5610554c350c573733ea04f7765264
| 41
|
py
|
Python
|
kiwi-engine-selector/kiwi/heuristics/user_vote_number.py
|
bubblegumsoldier/kiwi
|
91701c1806dcfbc1b038fecf7c2cab8bb07a01d4
|
[
"MIT"
] | null | null | null |
kiwi-engine-selector/kiwi/heuristics/user_vote_number.py
|
bubblegumsoldier/kiwi
|
91701c1806dcfbc1b038fecf7c2cab8bb07a01d4
|
[
"MIT"
] | null | null | null |
kiwi-engine-selector/kiwi/heuristics/user_vote_number.py
|
bubblegumsoldier/kiwi
|
91701c1806dcfbc1b038fecf7c2cab8bb07a01d4
|
[
"MIT"
] | null | null | null |
def get_heuristic(**kwargs):
return 0
| 20.5
| 28
| 0.707317
| 6
| 41
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.170732
| 41
| 2
| 29
| 20.5
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.